From b3f1b3b4784406932bd815b78ac8f0dad0b65e7c Mon Sep 17 00:00:00 2001 From: baderbuddy Date: Wed, 4 Nov 2020 10:17:37 -0500 Subject: [PATCH] Recategorize the results into standard categories. (#434) * Initial checkin for recategorizing checks * Fix tests * Fix tests * Update example output --- CHANGELOG.md | 1 + checks/cpuLimitsMissing.yaml | 2 +- checks/cpuRequestsMissing.yaml | 2 +- checks/hostNetworkSet.yaml | 2 +- checks/hostPortSet.yaml | 2 +- checks/livenessProbeMissing.yaml | 2 +- checks/memoryLimitsMissing.yaml | 2 +- checks/memoryRequestsMissing.yaml | 2 +- checks/pullPolicyNotAlways.yaml | 2 +- checks/readinessProbeMissing.yaml | 2 +- checks/tagNotSpecified.yaml | 2 +- .../{resources.md => efficiency.md} | 10 +- docs/check-documentation/health-checks.md | 22 - docs/check-documentation/images.md | 19 - docs/check-documentation/networking.md | 22 - docs/check-documentation/reliability.md | 32 ++ docs/check-documentation/security.md | 12 +- docs/usage.md | 6 +- examples/config-full.yaml | 17 +- examples/config.yaml | 17 +- examples/output.json | 394 +++++++++++++++++- go.sum | 1 + pkg/dashboard/helpers.go | 26 +- pkg/validator/container_test.go | 46 +- pkg/validator/controller_test.go | 4 +- pkg/validator/pod_test.go | 10 +- pkg/validator/schema_test.go | 20 +- 27 files changed, 498 insertions(+), 181 deletions(-) rename docs/check-documentation/{resources.md => efficiency.md} (78%) delete mode 100644 docs/check-documentation/health-checks.md delete mode 100644 docs/check-documentation/images.md delete mode 100644 docs/check-documentation/networking.md create mode 100644 docs/check-documentation/reliability.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 83a332e45..14e93c5c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # x.x.x (next release) +* Standardize categories of checks into Security, Reliability, and Efficiency # 1.2.1 * Update date on dashboard footer diff --git a/checks/cpuLimitsMissing.yaml b/checks/cpuLimitsMissing.yaml index ccf1ddfe9..6601e03d2 100644 --- a/checks/cpuLimitsMissing.yaml +++ b/checks/cpuLimitsMissing.yaml @@ -1,6 +1,6 @@ successMessage: CPU limits are set failureMessage: CPU limits should be set -category: Resources +category: Efficiency target: Container containers: exclude: diff --git a/checks/cpuRequestsMissing.yaml b/checks/cpuRequestsMissing.yaml index 77290d8be..f63ffdd4b 100644 --- a/checks/cpuRequestsMissing.yaml +++ b/checks/cpuRequestsMissing.yaml @@ -1,6 +1,6 @@ successMessage: CPU requests are set failureMessage: CPU requests should be set -category: Resources +category: Efficiency target: Container containers: exclude: diff --git a/checks/hostNetworkSet.yaml b/checks/hostNetworkSet.yaml index d54419bfc..c85748743 100644 --- a/checks/hostNetworkSet.yaml +++ b/checks/hostNetworkSet.yaml @@ -1,6 +1,6 @@ successMessage: Host network is not configured failureMessage: Host network should not be configured -category: Networking +category: Security target: Pod schema: '$schema': http://json-schema.org/draft-07/schema diff --git a/checks/hostPortSet.yaml b/checks/hostPortSet.yaml index cd193ea64..ea4da62d7 100644 --- a/checks/hostPortSet.yaml +++ b/checks/hostPortSet.yaml @@ -1,6 +1,6 @@ successMessage: Host port is not configured failureMessage: Host port should not be configured -category: Networking +category: Security target: Container schema: '$schema': http://json-schema.org/draft-07/schema diff --git a/checks/livenessProbeMissing.yaml b/checks/livenessProbeMissing.yaml index a78273048..2456bd6ab 100644 --- a/checks/livenessProbeMissing.yaml +++ b/checks/livenessProbeMissing.yaml @@ -1,6 +1,6 @@ successMessage: Liveness probe is configured failureMessage: Liveness probe should be configured -category: Health Checks +category: Reliability controllers: exclude: - Job diff --git a/checks/memoryLimitsMissing.yaml b/checks/memoryLimitsMissing.yaml index f3c14524d..53ffb25b8 100644 --- a/checks/memoryLimitsMissing.yaml +++ b/checks/memoryLimitsMissing.yaml @@ -1,6 +1,6 @@ successMessage: Memory limits are set failureMessage: Memory limits should be set -category: Resources +category: Efficiency target: Container containers: exclude: diff --git a/checks/memoryRequestsMissing.yaml b/checks/memoryRequestsMissing.yaml index a3f917817..3fce46abc 100644 --- a/checks/memoryRequestsMissing.yaml +++ b/checks/memoryRequestsMissing.yaml @@ -1,6 +1,6 @@ successMessage: Memory requests are set failureMessage: Memory requests should be set -category: Resources +category: Efficiency target: Container containers: exclude: diff --git a/checks/pullPolicyNotAlways.yaml b/checks/pullPolicyNotAlways.yaml index 89e591f5f..25a50aac9 100644 --- a/checks/pullPolicyNotAlways.yaml +++ b/checks/pullPolicyNotAlways.yaml @@ -1,6 +1,6 @@ successMessage: Image pull policy is "Always" failureMessage: Image pull policy should be "Always" -category: Images +category: Reliability target: Container schema: '$schema': http://json-schema.org/draft-07/schema diff --git a/checks/readinessProbeMissing.yaml b/checks/readinessProbeMissing.yaml index 1309bab8b..c655e13cc 100644 --- a/checks/readinessProbeMissing.yaml +++ b/checks/readinessProbeMissing.yaml @@ -1,6 +1,6 @@ successMessage: Readiness probe is configured failureMessage: Readiness probe should be configured -category: Health Checks +category: Reliability controllers: exclude: - Job diff --git a/checks/tagNotSpecified.yaml b/checks/tagNotSpecified.yaml index 102d34c7e..29ff9e96d 100644 --- a/checks/tagNotSpecified.yaml +++ b/checks/tagNotSpecified.yaml @@ -1,6 +1,6 @@ successMessage: Image tag is specified failureMessage: Image tag should be specified -category: Images +category: Reliability target: Container schema: '$schema': http://json-schema.org/draft-07/schema diff --git a/docs/check-documentation/resources.md b/docs/check-documentation/efficiency.md similarity index 78% rename from docs/check-documentation/resources.md rename to docs/check-documentation/efficiency.md index 60e21d706..55cfd8249 100644 --- a/docs/check-documentation/resources.md +++ b/docs/check-documentation/efficiency.md @@ -1,4 +1,4 @@ -# Resources +# Efficiency Polaris supports a number of checks related to CPU and Memory requests and limits. @@ -8,10 +8,10 @@ To simplify ensure that these values have been set, the following attributes are key | default | description ----|---------|------------ -`resources.cpuRequestsMissing` | `danger` | Fails when `resources.requests.cpu` attribute is not configured. -`resources.memoryRequestsMissing` | `danger` | Fails when `resources.requests.memory` attribute is not configured. -`resources.cpuLimitsMissing` | `danger` | Fails when `resources.limits.cpu` attribute is not configured. -`resources.memoryLimitsMissing` | `danger` | Fails when `resources.limits.memory` attribute is not configured. +`resources.cpuRequestsMissing` | `warning` | Fails when `resources.requests.cpu` attribute is not configured. +`resources.memoryRequestsMissing` | `warning` | Fails when `resources.requests.memory` attribute is not configured. +`resources.cpuLimitsMissing` | `warning` | Fails when `resources.limits.cpu` attribute is not configured. +`resources.memoryLimitsMissing` | `warning` | Fails when `resources.limits.memory` attribute is not configured. ## Background diff --git a/docs/check-documentation/health-checks.md b/docs/check-documentation/health-checks.md deleted file mode 100644 index 6fef39844..000000000 --- a/docs/check-documentation/health-checks.md +++ /dev/null @@ -1,22 +0,0 @@ -# Health Checks - -Polaris supports validating the presence of readiness and liveness probes in pods. - -key | default | description -----|---------|------------ -`healthChecks.readinessProbeMissing` | `warning` | Fails when a readiness probe is not configured for a pod. -`healthChecks.livenessProbeMissing` | `warning` | Fails when a liveness probe is not configured for a pod. - -## Background - -Readiness and liveness probes can help maintain the health of applications running inside Kubernetes. By default, Kubernetes only knows whether or not a process is running, not if it's healthy. Properly configured readiness and liveness probes will also be able to ensure the health of an application. - -Readiness probes are designed to ensure that an application has reached a "ready" state. In many cases there is a period of time between when a webserver process starts and when it is ready to receive traffic. A readiness probe can ensure the traffic is not sent to a pod until it is actually ready to receive traffic. - -Liveness probes are designed to ensure that an application stays in a healthy state. When a liveness probe fails, the pod will be restarted. - -## Further Reading - -- [Kubernetes Docs: Configure Liveness and Readiness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) -- [Utilizing Kubernetes Liveness and Readiness Probes to Automatically Recover From Failure](https://medium.com/spire-labs/utilizing-kubernetes-liveness-and-readiness-probes-to-automatically-recover-from-failure-2fe0314f2b2e) -- [Kubernetes Liveness and Readiness Probes: How to Avoid Shooting Yourself in the Foot](https://blog.colinbreck.com/kubernetes-liveness-and-readiness-probes-how-to-avoid-shooting-yourself-in-the-foot/) diff --git a/docs/check-documentation/images.md b/docs/check-documentation/images.md deleted file mode 100644 index ab416f9cb..000000000 --- a/docs/check-documentation/images.md +++ /dev/null @@ -1,19 +0,0 @@ -# Images - -Polaris supports a number of checks related to the image specified by pods. - -key | default | description -----|---------|------------ -`images.tagNotSpecified` | `danger` | Fails when an image tag is either not specified or `latest`. -`images.pullPolicyNotAlways` | `warning` | Fails when an image pull policy is not `always`. - -## Background - -Docker's `latest` tag is applied by default to images where a tag hasn't been specified. Not specifying a specific version of an image can lead to a wide variety of problems. The underlying image could include unexpected breaking changes that break your application whenever the latest image is pulled. Reusing the same tag for multiple versions of an image can lead to different nodes in the same cluster having different versions of an image, even if the tag is identical. - -Related to that, relying on cached versions of a Docker image can become a security vulnerability. By default, an image will be pulled if it isn't already cached on the node attempting to run it. This can result in variations in images that are running per node, or potentially provide a way to gain access to an image without having direct access to the ImagePullSecret. With that in mind, it's often better to ensure the a pod has `pullPolicy: Always` specified, so images are always pulled directly from their source. - -## Further Reading - -- [What's Wrong With The Docker :latest Tag?](https://vsupalov.com/docker-latest-tag/) -- [Kubernetes’ AlwaysPullImages Admission Control — the Importance, Implementation, and Security Vulnerability in its Absence](https://medium.com/@trstringer/kubernetes-alwayspullimages-admission-control-the-importance-implementation-and-security-d83ff3815840) diff --git a/docs/check-documentation/networking.md b/docs/check-documentation/networking.md deleted file mode 100644 index bc0df9633..000000000 --- a/docs/check-documentation/networking.md +++ /dev/null @@ -1,22 +0,0 @@ -# Networking - -Polaris supports a number of checks related to pod networking. - -key | default | description -----|---------|------------ -`networking.hostNetworkSet` | `warning` | Fails when `hostNetwork` attribute is configured. -`networking.hostPortSet` | `warning` | Fails when `hostPort` attribute is configured. - - -## Background - -Although Kubernetes allows you to deploy a pod with access to the host network namespace, it's rarely a good idea. A pod running with the `hostNetwork` attribute enabled will have access to the loopback device, services listening on localhost, and could be used to snoop on network activity of other pods on the same node. There are certain examples where setting `hostNetwork` to true is required, such as deploying a networking plugin like Flannel. - -Setting the `hostPort` attribute on a container will ensure that it is accessible on that specific port on each node it is deployed to. Unfortunately when this is specified, it limits where a pod can actually be scheduled in a cluster. - - -## Further Reading - -- [Kubernetes Docs: Configuration Best Practices](https://kubernetes.io/docs/concepts/configuration/overview/#services) - -- [Accessing Kubernetes Pods from Outside of the Cluster](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/) diff --git a/docs/check-documentation/reliability.md b/docs/check-documentation/reliability.md new file mode 100644 index 000000000..a180886a7 --- /dev/null +++ b/docs/check-documentation/reliability.md @@ -0,0 +1,32 @@ +# Reliability + +Polaris supports a number of checks related to keeping workloads running in a reliabile and consistent manner. + +key | default | description +----|---------|------------ +`reliability.readinessProbeMissing` | `warning` | Fails when a readiness probe is not configured for a pod. +`reliability.livenessProbeMissing` | `warning` | Fails when a liveness probe is not configured for a pod. +`reliability.tagNotSpecified` | `danger` | Fails when an image tag is either not specified or `latest`. +`reliability.pullPolicyNotAlways` | `warning` | Fails when an image pull policy is not `always`. +`reliability.priorityClassNotSet` | `ignore` | Fails when a priorityClassName is not set for a pod. +`reliability.multipleReplicasForDeployment` | `ignore` | Fails when there is only one replica for a deployment. + +## Background + +Readiness and liveness probes can help maintain the health of applications running inside Kubernetes. By default, Kubernetes only knows whether or not a process is running, not if it's healthy. Properly configured readiness and liveness probes will also be able to ensure the health of an application. + +Readiness probes are designed to ensure that an application has reached a "ready" state. In many cases there is a period of time between when a webserver process starts and when it is ready to receive traffic. A readiness probe can ensure the traffic is not sent to a pod until it is actually ready to receive traffic. + +Liveness probes are designed to ensure that an application stays in a healthy state. When a liveness probe fails, the pod will be restarted. + +Docker's `latest` tag is applied by default to images where a tag hasn't been specified. Not specifying a specific version of an image can lead to a wide variety of problems. The underlying image could include unexpected breaking changes that break your application whenever the latest image is pulled. Reusing the same tag for multiple versions of an image can lead to different nodes in the same cluster having different versions of an image, even if the tag is identical. + +Related to that, relying on cached versions of a Docker image can become a security vulnerability. By default, an image will be pulled if it isn't already cached on the node attempting to run it. This can result in variations in images that are running per node, or potentially provide a way to gain access to an image without having direct access to the ImagePullSecret. With that in mind, it's often better to ensure the a pod has `pullPolicy: Always` specified, so images are always pulled directly from their source. + +## Further Reading + +- [What's Wrong With The Docker :latest Tag?](https://vsupalov.com/docker-latest-tag/) +- [Kubernetes’ AlwaysPullImages Admission Control — the Importance, Implementation, and Security Vulnerability in its Absence](https://medium.com/@trstringer/kubernetes-alwayspullimages-admission-control-the-importance-implementation-and-security-d83ff3815840) +- [Kubernetes Docs: Configure Liveness and Readiness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) +- [Utilizing Kubernetes Liveness and Readiness Probes to Automatically Recover From Failure](https://medium.com/spire-labs/utilizing-kubernetes-liveness-and-readiness-probes-to-automatically-recover-from-failure-2fe0314f2b2e) +- [Kubernetes Liveness and Readiness Probes: How to Avoid Shooting Yourself in the Foot](https://blog.colinbreck.com/kubernetes-liveness-and-readiness-probes-how-to-avoid-shooting-yourself-in-the-foot/) diff --git a/docs/check-documentation/security.md b/docs/check-documentation/security.md index 7b049e343..6a4bbdc41 100644 --- a/docs/check-documentation/security.md +++ b/docs/check-documentation/security.md @@ -8,14 +8,20 @@ key | default | description `security.hostPIDSet` | `danger` | Fails when `hostPID` attribute is configured. `security.notReadOnlyRootFilesystem` | `warning` | Fails when `securityContext.readOnlyRootFilesystem` is not true. `security.privilegeEscalationAllowed` | `danger` | Fails when `securityContext.allowPrivilegeEscalation` is true. -`security.runAsRootAllowed` | `danger` | Fails when `securityContext.runAsNonRoot` is not true. +`security.runAsRootAllowed` | `warning` | Fails when `securityContext.runAsNonRoot` is not true. `security.runAsPrivileged` | `danger` | Fails when `securityContext.privileged` is true. `security.insecureCapabilities` | `warning` | Fails when `securityContext.capabilities` includes one of the capabilities [listed here](/checks/insecureCapabilities.yaml) `security.dangerousCapabilities` | `danger` | Fails when `securityContext.capabilities` includes one of the capabilities [listed here](/checks/dangerousCapabilities.yaml) +`security.hostNetworkSet` | `warning` | Fails when `hostNetwork` attribute is configured. +`security.hostPortSet` | `warning` | Fails when `hostPort` attribute is configured. ## Background -Securing workloads in Kubernetes is an important part of overall cluster security. The overall goal should be to ensure that containers are running with as minimal privileges as possible. This includes avoiding privilege escalation, not running containers with a root user, and using read only file systems wherever possible. +Securing workloads in Kubernetes is an important part of overall cluster security. The overall goal should be to ensure that containers are running with as minimal privileges as possible. This includes avoiding privilege escalation, not running containers with a root user, not giving excessive access to the host network, and using read only file systems wherever possible. + +A pod running with the `hostNetwork` attribute enabled will have access to the loopback device, services listening on localhost, and could be used to snoop on network activity of other pods on the same node. There are certain examples where setting `hostNetwork` to true is required, such as deploying a networking plugin like Flannel. + +Setting the `hostPort` attribute on a container will ensure that it is accessible on that specific port on each node it is deployed to. Unfortunately when this is specified, it limits where a pod can actually be scheduled in a cluster. Much of this configuration can be found in the `securityContext` attribute for both Kubernetes pods and containers. Where configuration is available at both a pod and container level, Polaris validates both. @@ -25,3 +31,5 @@ Much of this configuration can be found in the `securityContext` attribute for b - [Kubernetes Security Book](https://kubernetes-security.info/) - [Kubernetes Docs: Set capabilities for a Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container) - [Linux Programmer's Manual: Capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) +- [Kubernetes Docs: Configuration Best Practices](https://kubernetes.io/docs/concepts/configuration/overview/#services) +- [Accessing Kubernetes Pods from Outside of the Cluster](http://alesnosek.com/blog/2017/02/14/accessing-kubernetes-pods-from-outside-of-the-cluster/) diff --git a/docs/usage.md b/docs/usage.md index 61ff76e88..581ac9a23 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -14,11 +14,9 @@ Each check can be assigned a `severity`. Only checks with a severity of `danger` Polaris validation checks fall into several different categories: -- [Health Checks](check-documentation/health-checks.md) -- [Images](check-documentation/images.md) -- [Networking](check-documentation/networking.md) -- [Resources](check-documentation/resources.md) - [Security](check-documentation/security.md) +- [Reliability](check-documentation/reliability.md) +- [Efficiency](check-documentation/efficiency.md) #### Custom Checks If you'd like to create your own checks, you can use [JSON Schema](https://json-schema.org/). For example, diff --git a/examples/config-full.yaml b/examples/config-full.yaml index c4553d0d1..fa4892beb 100644 --- a/examples/config-full.yaml +++ b/examples/config-full.yaml @@ -2,20 +2,15 @@ checks: # reliability multipleReplicasForDeployment: warning priorityClassNotSet: warning - # resources - cpuRequestsMissing: warning - cpuLimitsMissing: warning - memoryRequestsMissing: warning - memoryLimitsMissing: warning - # images tagNotSpecified: danger pullPolicyNotAlways: warning - # healthChecks readinessProbeMissing: warning livenessProbeMissing: warning - # networking - hostNetworkSet: warning - hostPortSet: warning + # efficiency + cpuRequestsMissing: warning + cpuLimitsMissing: warning + memoryRequestsMissing: warning + memoryLimitsMissing: warning # security hostIPCSet: danger hostPIDSet: danger @@ -25,6 +20,8 @@ checks: runAsPrivileged: danger dangerousCapabilities: danger insecureCapabilities: warning + hostNetworkSet: warning + hostPortSet: warning # custom resourceLimits: warning imageRegistry: danger diff --git a/examples/config.yaml b/examples/config.yaml index 403fa3556..586c0fd83 100644 --- a/examples/config.yaml +++ b/examples/config.yaml @@ -2,20 +2,15 @@ checks: # reliability multipleReplicasForDeployment: ignore priorityClassNotSet: ignore - # resources - cpuRequestsMissing: warning - cpuLimitsMissing: warning - memoryRequestsMissing: warning - memoryLimitsMissing: warning - # images tagNotSpecified: danger pullPolicyNotAlways: warning - # healthChecks readinessProbeMissing: warning livenessProbeMissing: warning - # networking - hostNetworkSet: warning - hostPortSet: warning + # efficiency + cpuRequestsMissing: warning + cpuLimitsMissing: warning + memoryRequestsMissing: warning + memoryLimitsMissing: warning # security hostIPCSet: danger hostPIDSet: danger @@ -25,6 +20,8 @@ checks: runAsPrivileged: danger dangerousCapabilities: danger insecureCapabilities: warning + hostNetworkSet: warning + hostPortSet: warning exemptions: - namespace: kube-system diff --git a/examples/output.json b/examples/output.json index beca1dab4..21343700f 100644 --- a/examples/output.json +++ b/examples/output.json @@ -7,24 +7,19 @@ "ClusterInfo": { "Version": "unknown", "Nodes": 0, - "Pods": 0, - "Namespaces": 2, - "Deployments": 2, - "StatefulSets": 0, - "DaemonSets": 0, - "Jobs": 0, - "CronJobs": 0, - "ReplicationControllers": 0 + "Pods": 5, + "Namespaces": 3, + "Controllers": 5 }, "Results": [ { "Name": "polaris-dashboard", "Namespace": "polaris", "Kind": "Deployment", - "Messages": {}, + "Results": {}, "PodResult": { "Name": "", - "Messages": { + "Results": { "hostIPCSet": { "ID": "hostIPCSet", "Message": "Host IPC is not configured", @@ -50,7 +45,7 @@ "ContainerResults": [ { "Name": "dashboard", - "Messages": { + "Results": { "cpuLimitsMissing": { "ID": "cpuLimitsMissing", "Message": "CPU limits are set", @@ -145,16 +140,17 @@ } } ] - } + }, + "CreatedTime": "0001-01-01T00:00:00Z" }, { "Name": "polaris-webhook", "Namespace": "polaris", "Kind": "Deployment", - "Messages": {}, + "Results": {}, "PodResult": { "Name": "", - "Messages": { + "Results": { "hostIPCSet": { "ID": "hostIPCSet", "Message": "Host IPC is not configured", @@ -180,7 +176,7 @@ "ContainerResults": [ { "Name": "webhook", - "Messages": { + "Results": { "cpuLimitsMissing": { "ID": "cpuLimitsMissing", "Message": "CPU limits are set", @@ -275,7 +271,373 @@ } } ] - } + }, + "CreatedTime": "0001-01-01T00:00:00Z" + }, + { + "Name": "polaris-certificate-updater", + "Namespace": "polaris", + "Kind": "Job", + "Results": {}, + "PodResult": { + "Name": "", + "Results": { + "hostIPCSet": { + "ID": "hostIPCSet", + "Message": "Host IPC is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostNetworkSet": { + "ID": "hostNetworkSet", + "Message": "Host network is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "hostPIDSet": { + "ID": "hostPIDSet", + "Message": "Host PID is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + } + }, + "ContainerResults": [ + { + "Name": "webhook-certificate-generator", + "Results": { + "cpuLimitsMissing": { + "ID": "cpuLimitsMissing", + "Message": "CPU limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "cpuRequestsMissing": { + "ID": "cpuRequestsMissing", + "Message": "CPU requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "dangerousCapabilities": { + "ID": "dangerousCapabilities", + "Message": "Container does not have any dangerous capabilities", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostPortSet": { + "ID": "hostPortSet", + "Message": "Host port is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "insecureCapabilities": { + "ID": "insecureCapabilities", + "Message": "Container does not have any insecure capabilities", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "memoryLimitsMissing": { + "ID": "memoryLimitsMissing", + "Message": "Memory limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "memoryRequestsMissing": { + "ID": "memoryRequestsMissing", + "Message": "Memory requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "privilegeEscalationAllowed": { + "ID": "privilegeEscalationAllowed", + "Message": "Privilege escalation not allowed", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "runAsPrivileged": { + "ID": "runAsPrivileged", + "Message": "Not running as privileged", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "runAsRootAllowed": { + "ID": "runAsRootAllowed", + "Message": "Is not allowed to run as root", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "tagNotSpecified": { + "ID": "tagNotSpecified", + "Message": "Image tag is specified", + "Success": true, + "Severity": "danger", + "Category": "Images" + } + } + } + ] + }, + "CreatedTime": "0001-01-01T00:00:00Z" + }, + { + "Name": "polaris-webhook", + "Namespace": "polaris", + "Kind": "Deployment", + "Results": {}, + "PodResult": { + "Name": "", + "Results": { + "hostIPCSet": { + "ID": "hostIPCSet", + "Message": "Host IPC is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostNetworkSet": { + "ID": "hostNetworkSet", + "Message": "Host network is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "hostPIDSet": { + "ID": "hostPIDSet", + "Message": "Host PID is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + } + }, + "ContainerResults": [ + { + "Name": "webhook", + "Results": { + "cpuLimitsMissing": { + "ID": "cpuLimitsMissing", + "Message": "CPU limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "cpuRequestsMissing": { + "ID": "cpuRequestsMissing", + "Message": "CPU requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "dangerousCapabilities": { + "ID": "dangerousCapabilities", + "Message": "Container does not have any dangerous capabilities", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostPortSet": { + "ID": "hostPortSet", + "Message": "Host port is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "insecureCapabilities": { + "ID": "insecureCapabilities", + "Message": "Container does not have any insecure capabilities", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "livenessProbeMissing": { + "ID": "livenessProbeMissing", + "Message": "Liveness probe is configured", + "Success": true, + "Severity": "warning", + "Category": "Health Checks" + }, + "memoryLimitsMissing": { + "ID": "memoryLimitsMissing", + "Message": "Memory limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "memoryRequestsMissing": { + "ID": "memoryRequestsMissing", + "Message": "Memory requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "privilegeEscalationAllowed": { + "ID": "privilegeEscalationAllowed", + "Message": "Privilege escalation not allowed", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "readinessProbeMissing": { + "ID": "readinessProbeMissing", + "Message": "Readiness probe is configured", + "Success": true, + "Severity": "warning", + "Category": "Health Checks" + }, + "runAsPrivileged": { + "ID": "runAsPrivileged", + "Message": "Not running as privileged", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "runAsRootAllowed": { + "ID": "runAsRootAllowed", + "Message": "Is not allowed to run as root", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "tagNotSpecified": { + "ID": "tagNotSpecified", + "Message": "Image tag is specified", + "Success": true, + "Severity": "danger", + "Category": "Images" + } + } + } + ] + }, + "CreatedTime": "0001-01-01T00:00:00Z" + }, + { + "Name": "polaris-certificate-updater", + "Namespace": "polaris", + "Kind": "Job", + "Results": {}, + "PodResult": { + "Name": "", + "Results": { + "hostIPCSet": { + "ID": "hostIPCSet", + "Message": "Host IPC is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostNetworkSet": { + "ID": "hostNetworkSet", + "Message": "Host network is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "hostPIDSet": { + "ID": "hostPIDSet", + "Message": "Host PID is not configured", + "Success": true, + "Severity": "danger", + "Category": "Security" + } + }, + "ContainerResults": [ + { + "Name": "webhook-certificate-generator", + "Results": { + "cpuLimitsMissing": { + "ID": "cpuLimitsMissing", + "Message": "CPU limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "cpuRequestsMissing": { + "ID": "cpuRequestsMissing", + "Message": "CPU requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "dangerousCapabilities": { + "ID": "dangerousCapabilities", + "Message": "Container does not have any dangerous capabilities", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "hostPortSet": { + "ID": "hostPortSet", + "Message": "Host port is not configured", + "Success": true, + "Severity": "warning", + "Category": "Networking" + }, + "insecureCapabilities": { + "ID": "insecureCapabilities", + "Message": "Container does not have any insecure capabilities", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "memoryLimitsMissing": { + "ID": "memoryLimitsMissing", + "Message": "Memory limits are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "memoryRequestsMissing": { + "ID": "memoryRequestsMissing", + "Message": "Memory requests are set", + "Success": true, + "Severity": "warning", + "Category": "Resources" + }, + "privilegeEscalationAllowed": { + "ID": "privilegeEscalationAllowed", + "Message": "Privilege escalation not allowed", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "runAsPrivileged": { + "ID": "runAsPrivileged", + "Message": "Not running as privileged", + "Success": true, + "Severity": "danger", + "Category": "Security" + }, + "runAsRootAllowed": { + "ID": "runAsRootAllowed", + "Message": "Is not allowed to run as root", + "Success": true, + "Severity": "warning", + "Category": "Security" + }, + "tagNotSpecified": { + "ID": "tagNotSpecified", + "Message": "Image tag is specified", + "Success": true, + "Severity": "danger", + "Category": "Images" + } + } + } + ] + }, + "CreatedTime": "0001-01-01T00:00:00Z" } ] } \ No newline at end of file diff --git a/go.sum b/go.sum index 48cdfccdd..3593b8088 100644 --- a/go.sum +++ b/go.sum @@ -554,6 +554,7 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= diff --git a/pkg/dashboard/helpers.go b/pkg/dashboard/helpers.go index 4809a7acd..b96fa642e 100644 --- a/pkg/dashboard/helpers.go +++ b/pkg/dashboard/helpers.go @@ -127,29 +127,13 @@ func getCategoryLink(category string) string { func getCategoryInfo(category string) string { switch category { - case "Health Checks": + case "Reliability": return fmt.Sprintf(` - Properly configured health checks can ensure the long term availability - and reliability of your application running in Kubernetes. Polaris - validates that health checks are configured for each pod running in - your cluster. + Kubernetes is built to reliabily run highly available applications. + Polaris includes a number of checks to ensure that you are maximizing + the reliability potential of Kubernetes. `) - case "Images": - return fmt.Sprintf(` - Images are the backbone of any Kubernetes cluster, containing the applications - that run in each container. Polaris validates that images are configured with - specific tags instead of just pulling the latest image on each run. This is - important for the stability and security of your workloads. - `) - case "Networking": - return fmt.Sprintf(` - Networking configuration in Kubernetes can be quite powerful. Polaris - validates that pods are not configured to have access to sensitive host - networking configuration. There are certain use cases such as a container - overlay network like Calico, where this level of access is required, but - the majority of workloads running on Kubernetes should not need this. - `) - case "Resources": + case "Efficiency": return fmt.Sprintf(` Configuring resource requests and limits for workloads running in Kubernetes helps ensure that every container will have access to all the resources it diff --git a/pkg/validator/container_test.go b/pkg/validator/container_test.go index 9b87f65d6..4ab651b1d 100644 --- a/pkg/validator/container_test.go +++ b/pkg/validator/container_test.go @@ -108,14 +108,14 @@ func TestValidateResourcesEmptyContainer(t *testing.T) { Success: false, Severity: "warning", Message: "CPU requests should be set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryRequestsMissing", Success: false, Severity: "warning", Message: "Memory requests should be set", - Category: "Resources", + Category: "Efficiency", }, } @@ -125,14 +125,14 @@ func TestValidateResourcesEmptyContainer(t *testing.T) { Success: false, Severity: "danger", Message: "CPU limits should be set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryLimitsMissing", Success: false, Severity: "danger", Message: "Memory limits should be set", - Category: "Resources", + Category: "Efficiency", }, } @@ -162,8 +162,8 @@ func TestValidateHealthChecks(t *testing.T) { ReadinessProbe: &probe, } - l := ResultMessage{ID: "livenessProbeMissing", Success: false, Severity: "warning", Message: "Liveness probe should be configured", Category: "Health Checks"} - r := ResultMessage{ID: "readinessProbeMissing", Success: false, Severity: "danger", Message: "Readiness probe should be configured", Category: "Health Checks"} + l := ResultMessage{ID: "livenessProbeMissing", Success: false, Severity: "warning", Message: "Liveness probe should be configured", Category: "Reliability"} + r := ResultMessage{ID: "readinessProbeMissing", Success: false, Severity: "danger", Message: "Readiness probe should be configured", Category: "Reliability"} f1 := []ResultMessage{} f2 := []ResultMessage{r} w1 := []ResultMessage{l} @@ -245,7 +245,7 @@ func TestValidateImage(t *testing.T) { Message: "Image tag should be specified", Success: false, Severity: "danger", - Category: "Images", + Category: "Reliability", }}, }, { @@ -257,7 +257,7 @@ func TestValidateImage(t *testing.T) { Message: "Image tag should be specified", Success: false, Severity: "danger", - Category: "Images", + Category: "Reliability", }}, }, { @@ -269,7 +269,7 @@ func TestValidateImage(t *testing.T) { Message: "Image tag should be specified", Success: false, Severity: "danger", - Category: "Images", + Category: "Reliability", }}, }, { @@ -281,13 +281,13 @@ func TestValidateImage(t *testing.T) { Message: "Image pull policy should be \"Always\"", Success: false, Severity: "danger", - Category: "Images", + Category: "Reliability", }, { ID: "tagNotSpecified", Message: "Image tag should be specified", Success: false, Severity: "danger", - Category: "Images", + Category: "Reliability", }}, }, { @@ -356,7 +356,7 @@ func TestValidateNetworking(t *testing.T) { Message: "Host port is not configured", Success: true, Severity: "warning", - Category: "Networking", + Category: "Security", }}, }, { @@ -368,7 +368,7 @@ func TestValidateNetworking(t *testing.T) { Message: "Host port is not configured", Success: true, Severity: "warning", - Category: "Networking", + Category: "Security", }}, }, { @@ -386,7 +386,7 @@ func TestValidateNetworking(t *testing.T) { Message: "Host port should not be configured", Success: false, Severity: "warning", - Category: "Networking", + Category: "Security", }}, }, { @@ -398,7 +398,7 @@ func TestValidateNetworking(t *testing.T) { Message: "Host port is not configured", Success: true, Severity: "warning", - Category: "Networking", + Category: "Security", }}, }, { @@ -410,7 +410,7 @@ func TestValidateNetworking(t *testing.T) { Message: "Host port should not be configured", Success: false, Severity: "danger", - Category: "Networking", + Category: "Security", }}, }, } @@ -1101,14 +1101,14 @@ func TestValidateResourcesExemption(t *testing.T) { Success: false, Severity: "warning", Message: "CPU requests should be set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryRequestsMissing", Success: false, Severity: "warning", Message: "Memory requests should be set", - Category: "Resources", + Category: "Efficiency", }, } @@ -1118,14 +1118,14 @@ func TestValidateResourcesExemption(t *testing.T) { Success: false, Severity: "danger", Message: "CPU limits should be set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryLimitsMissing", Success: false, Severity: "danger", Message: "Memory limits should be set", - Category: "Resources", + Category: "Efficiency", }, } @@ -1145,7 +1145,7 @@ func TestValidateResourcesEmptyContainerCPURequestsExempt(t *testing.T) { Success: false, Severity: "warning", Message: "Memory requests should be set", - Category: "Resources", + Category: "Efficiency", }, } @@ -1155,14 +1155,14 @@ func TestValidateResourcesEmptyContainerCPURequestsExempt(t *testing.T) { Success: false, Severity: "danger", Message: "CPU limits should be set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryLimitsMissing", Success: false, Severity: "danger", Message: "Memory limits should be set", - Category: "Resources", + Category: "Efficiency", }, } diff --git a/pkg/validator/controller_test.go b/pkg/validator/controller_test.go index 11193e202..8bfed5051 100644 --- a/pkg/validator/controller_test.go +++ b/pkg/validator/controller_test.go @@ -114,8 +114,8 @@ func TestSkipHealthChecks(t *testing.T) { Dangers: uint(1), } expectedResults := ResultSet{ - "readinessProbeMissing": {ID: "readinessProbeMissing", Message: "Readiness probe should be configured", Success: false, Severity: "danger", Category: "Health Checks"}, - "livenessProbeMissing": {ID: "livenessProbeMissing", Message: "Liveness probe should be configured", Success: false, Severity: "warning", Category: "Health Checks"}, + "readinessProbeMissing": {ID: "readinessProbeMissing", Message: "Readiness probe should be configured", Success: false, Severity: "danger", Category: "Reliability"}, + "livenessProbeMissing": {ID: "livenessProbeMissing", Message: "Liveness probe should be configured", Success: false, Severity: "warning", Category: "Reliability"}, } actualResult, err := ValidateController(context.Background(), &c, deployment) if err != nil { diff --git a/pkg/validator/pod_test.go b/pkg/validator/pod_test.go index ef9f52534..bfbc22abb 100644 --- a/pkg/validator/pod_test.go +++ b/pkg/validator/pod_test.go @@ -49,7 +49,7 @@ func TestValidatePod(t *testing.T) { expectedResults := ResultSet{ "hostIPCSet": {ID: "hostIPCSet", Message: "Host IPC is not configured", Success: true, Severity: "danger", Category: "Security"}, - "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Networking"}, + "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Security"}, "hostPIDSet": {ID: "hostPIDSet", Message: "Host PID is not configured", Success: true, Severity: "danger", Category: "Security"}, } @@ -86,7 +86,7 @@ func TestInvalidIPCPod(t *testing.T) { } expectedResults := ResultSet{ "hostIPCSet": {ID: "hostIPCSet", Message: "Host IPC should not be configured", Success: false, Severity: "danger", Category: "Security"}, - "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Networking"}, + "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Security"}, "hostPIDSet": {ID: "hostPIDSet", Message: "Host PID is not configured", Success: true, Severity: "danger", Category: "Security"}, } @@ -123,7 +123,7 @@ func TestInvalidNeworkPod(t *testing.T) { } expectedResults := ResultSet{ - "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network should not be configured", Success: false, Severity: "warning", Category: "Networking"}, + "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network should not be configured", Success: false, Severity: "warning", Category: "Security"}, "hostIPCSet": {ID: "hostIPCSet", Message: "Host IPC is not configured", Success: true, Severity: "danger", Category: "Security"}, "hostPIDSet": {ID: "hostPIDSet", Message: "Host PID is not configured", Success: true, Severity: "danger", Category: "Security"}, } @@ -163,7 +163,7 @@ func TestInvalidPIDPod(t *testing.T) { expectedResults := ResultSet{ "hostPIDSet": {ID: "hostPIDSet", Message: "Host PID should not be configured", Success: false, Severity: "danger", Category: "Security"}, "hostIPCSet": {ID: "hostIPCSet", Message: "Host IPC is not configured", Success: true, Severity: "danger", Category: "Security"}, - "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Networking"}, + "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Security"}, } actualPodResult, err := ValidatePod(context.Background(), &c, workload) @@ -207,7 +207,7 @@ func TestExemption(t *testing.T) { Dangers: uint(0), } expectedResults := ResultSet{ - "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Networking"}, + "hostNetworkSet": {ID: "hostNetworkSet", Message: "Host network is not configured", Success: true, Severity: "warning", Category: "Security"}, "hostPIDSet": {ID: "hostPIDSet", Message: "Host PID is not configured", Success: true, Severity: "danger", Category: "Security"}, } diff --git a/pkg/validator/schema_test.go b/pkg/validator/schema_test.go index 490161f89..303357fd9 100644 --- a/pkg/validator/schema_test.go +++ b/pkg/validator/schema_test.go @@ -42,7 +42,7 @@ customChecks: - initContainer successMessage: Memory limits are within the required range failureMessage: Memory limits should be within the required range - category: Resources + category: Efficiency target: Container schema: '$schema': http://json-schema.org/draft-07/schema @@ -67,7 +67,7 @@ customChecks: memoryRequestsRange: successMessage: Memory requests are within the required range failureMessage: Memory requests should be within the required range - category: Resources + category: Efficiency target: Container containers: exclude: @@ -118,7 +118,7 @@ func TestValidateResourcesPartiallyValid(t *testing.T) { Success: false, Severity: "warning", Message: "Memory limits should be within the required range", - Category: "Resources", + Category: "Efficiency", }, } @@ -128,7 +128,7 @@ func TestValidateResourcesPartiallyValid(t *testing.T) { Success: false, Severity: "danger", Message: "Memory requests should be within the required range", - Category: "Resources", + Category: "Efficiency", }, } @@ -192,14 +192,14 @@ func TestValidateResourcesFullyValid(t *testing.T) { Success: true, Severity: "danger", Message: "Memory requests are within the required range", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryLimitsRange", Success: true, Severity: "warning", Message: "Memory limits are within the required range", - Category: "Resources", + Category: "Efficiency", }, } @@ -211,28 +211,28 @@ func TestValidateResourcesFullyValid(t *testing.T) { Success: true, Severity: "warning", Message: "CPU requests are set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryRequestsMissing", Success: true, Severity: "warning", Message: "Memory requests are set", - Category: "Resources", + Category: "Efficiency", }, { ID: "cpuLimitsMissing", Success: true, Severity: "danger", Message: "CPU limits are set", - Category: "Resources", + Category: "Efficiency", }, { ID: "memoryLimitsMissing", Success: true, Severity: "danger", Message: "Memory limits are set", - Category: "Resources", + Category: "Efficiency", }, }