Skip to content

Commit

Permalink
Add EvictOptions struct to EvictPod()
Browse files Browse the repository at this point in the history
  • Loading branch information
damemi committed Jul 11, 2022
1 parent 6e69a10 commit 1a1ebbc
Show file tree
Hide file tree
Showing 11 changed files with 42 additions and 30 deletions.
2 changes: 1 addition & 1 deletion pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
continue
}
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
f(context.WithValue(ctx, "strategyName", string(name)), rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
Expand Down
35 changes: 16 additions & 19 deletions pkg/descheduler/evictions/evictions.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,23 +110,20 @@ func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
return false
}

// EvictOptions provides a handle for passing additional info to EvictPod
type EvictOptions struct {
Strategy string
Reason string
}

// EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side.
// Eviction reason can be set through the ctx's evictionReason:STRING pair
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
strategy := ""
if ctx.Value("strategyName") != nil {
strategy = ctx.Value("strategyName").(string)
}
reason := ""
if ctx.Value("evictionReason") != nil {
reason = ctx.Value("evictionReason").(string)
}

// Eviction reason can be set through the EvictOptions struct
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
if pod.Spec.NodeName != "" {
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": opts.Strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
return false
Expand All @@ -135,7 +132,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {

if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": opts.Strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
return false
Expand All @@ -144,9 +141,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.Strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
return false
}
Expand All @@ -157,18 +154,18 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod) bool {
pe.namespacePodCount[pod.Namespace]++

if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.Strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}

if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.Strategy, "node", pod.Spec.NodeName)
} else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", pod.Spec.NodeName)
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.Strategy, "node", pod.Spec.NodeName)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(3)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", opts.Reason))
}
return true
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ import (
"sigs.k8s.io/descheduler/pkg/utils"
)

const RemoveDuplicatesStrategyName = "RemoveDuplicates"

func validateRemoveDuplicatePodsParams(params *api.StrategyParameters) error {
if params == nil {
return nil
Expand Down Expand Up @@ -200,7 +202,7 @@ func RemoveDuplicatePods(
// It's assumed all duplicated pods are in the same priority class
// TODO(jchaloup): check if the pod has a different node to lend to
for _, pod := range pods[upperAvg-1:] {
podEvictor.EvictPod(ctx, pod)
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{Strategy: RemoveDuplicatesStrategyName})
if podEvictor.NodeLimitExceeded(nodeMap[nodeName]) {
continue loop
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/failedpods.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
)

const FailedPodsStrategyName = "FailedPods"

// validatedFailedPodsStrategyParams contains validated strategy parameters
type validatedFailedPodsStrategyParams struct {
validation.ValidatedStrategyParams
Expand Down Expand Up @@ -75,7 +77,7 @@ func RemoveFailedPods(
continue
}

podEvictor.EvictPod(ctx, pods[i])
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{Strategy: FailedPodsStrategyName})
if podEvictor.NodeLimitExceeded(node) {
continue
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/strategies/node_affinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,14 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"

"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)

const NodeAffinityStrategyName = "NodeAffinity"

func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
if params == nil || len(params.NodeAffinityType) == 0 {
return fmt.Errorf("NodeAffinityType is empty")
Expand Down Expand Up @@ -94,7 +95,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
podEvictor.EvictPod(ctx, pod)
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{Strategy: NodeAffinityStrategyName})
if podEvictor.NodeLimitExceeded(node) {
continue loop
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/node_taint.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
"k8s.io/klog/v2"
)

const NodeTaintStrategyName = "NodeTaint"

func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error {
if params == nil {
return nil
Expand Down Expand Up @@ -107,7 +109,7 @@ loop:
taintFilterFnc,
) {
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
podEvictor.EvictPod(ctx, pods[i])
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{Strategy: NodeTaintStrategyName})
if podEvictor.NodeLimitExceeded(node) {
continue loop
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ func evictPods(
continue
}

if podEvictor.EvictPod(ctx, pod) {
if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{Strategy: strategy}) {
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))

for name := range totalAvailableUsage {
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/pod_antiaffinity.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ import (
"k8s.io/klog/v2"
)

const PodAntiAffinityStrategyName = "InterPodAntiAffinity"

func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyParameters) error {
if params == nil {
return nil
Expand Down Expand Up @@ -87,7 +89,7 @@ loop:
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
if podEvictor.EvictPod(ctx, pods[i]) {
if podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{Strategy: PodAntiAffinityStrategyName}) {
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
// Update pods.
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/pod_lifetime.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ import (
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)

const PodLifetimeStrategyName = "PodLifeTime"

var (
podLifeTimeAllowedStates = sets.NewString(
string(v1.PodRunning),
Expand Down Expand Up @@ -131,7 +133,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
if nodeLimitExceeded[pod.Spec.NodeName] {
continue
}
if podEvictor.EvictPod(ctx, pod) {
if podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{Strategy: PodLifetimeStrategyName}) {
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
}
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/toomanyrestarts.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ import (
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)

const TooManyRestartsStrategyName = "TooManyRestarts"

func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
Expand Down Expand Up @@ -90,7 +92,7 @@ loop:
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
continue
}
podEvictor.EvictPod(ctx, pods[i])
podEvictor.EvictPod(ctx, pods[i], evictions.EvictOptions{Strategy: TooManyRestartsStrategyName})
if podEvictor.NodeLimitExceeded(node) {
continue loop
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/topologyspreadconstraint.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ import (
"sigs.k8s.io/descheduler/pkg/utils"
)

const PodTopologySpreadStrategyName = "PodTopologySpread"

// AntiAffinityTerm's topology key value used in predicate metadata
type topologyPair struct {
key string
Expand Down Expand Up @@ -191,7 +193,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
if !isEvictable(pod) {
continue
}
podEvictor.EvictPod(ctx, pod)
podEvictor.EvictPod(ctx, pod, evictions.EvictOptions{Strategy: PodTopologySpreadStrategyName})
if podEvictor.NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
nodeLimitExceeded[pod.Spec.NodeName] = true
}
Expand Down

0 comments on commit 1a1ebbc

Please sign in to comment.