Skip to content

Commit

Permalink
fix(controller): change pod status calculate with sidecar
Browse files Browse the repository at this point in the history
Signed-off-by: linghaoSu <linghao.su@daocloud.io>
  • Loading branch information
linghaoSu committed Oct 8, 2024
1 parent 3f249ff commit 5cbbc7d
Show file tree
Hide file tree
Showing 3 changed files with 195 additions and 9 deletions.
51 changes: 47 additions & 4 deletions controller/cache/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,34 @@ func populateIstioVirtualServiceInfo(un *unstructured.Unstructured, res *Resourc
res.NetworkingInfo = &v1alpha1.ResourceNetworkingInfo{TargetRefs: targets, ExternalURLs: urls}
}


func isPodInitializedConditionTrue(status *v1.PodStatus) bool {
for _, condition := range status.Conditions {
if condition.Type != v1.PodInitialized {
continue
}

return condition.Status == v1.ConditionTrue
}
return false
}

func isRestartableInitContainer(initContainer *v1.Container) bool {
if initContainer == nil {
return false
}
if initContainer.RestartPolicy == nil {
return false
}

return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
}

// IsPodPhaseTerminal returns true if the pod's phase is terminal.
func IsPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}

func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
pod := v1.Pod{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &pod)
Expand All @@ -288,7 +316,8 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
totalContainers := len(pod.Spec.Containers)
readyContainers := 0

reason := string(pod.Status.Phase)
podPhase := pod.Status.Phase
reason := string(podPhase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
Expand All @@ -306,13 +335,28 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
res.Images = append(res.Images, image)
}

// If the Pod carries {type:PodScheduled, reason:SchedulingGated}, set reason to 'SchedulingGated'.
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
reason = v1.PodReasonSchedulingGated
}
}

initContainers := make(map[string]*v1.Container)
for i := range pod.Spec.InitContainers {
initContainers[pod.Spec.InitContainers[i].Name] = &pod.Spec.InitContainers[i]
}

initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case isRestartableInitContainer(initContainers[container.Name]) &&
container.Started != nil && *container.Started:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
Expand All @@ -334,8 +378,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
}
break
}
if !initializing {
restarts = 0
if !initializing || isPodInitializedConditionTrue(&pod.Status) {
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
Expand Down Expand Up @@ -370,7 +413,7 @@ func populatePodInfo(un *unstructured.Unstructured, res *ResourceInfo) {
// and https://github.com/kubernetes/kubernetes/issues/90358#issuecomment-617859364
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
} else if pod.DeletionTimestamp != nil && !IsPodPhaseTerminal(podPhase) {
reason = "Terminating"
}

Expand Down
111 changes: 111 additions & 0 deletions controller/cache/info_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,117 @@ func TestGetPodInfo(t *testing.T) {
assert.Equal(t, &v1alpha1.ResourceNetworkingInfo{Labels: map[string]string{"app": "guestbook"}}, info.NetworkingInfo)
}


func TestGetPodWithInitialContainerInfo(t *testing.T) {
pod := strToUnstructured(`
apiVersion: "v1"
kind: "Pod"
metadata:
labels:
app: "app-with-initial-container"
name: "app-with-initial-container-5f46976fdb-vd6rv"
namespace: "default"
ownerReferences:
- apiVersion: "apps/v1"
kind: "ReplicaSet"
name: "app-with-initial-container-5f46976fdb"
spec:
containers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container"
initContainers:
- image: "alpine:latest"
imagePullPolicy: "Always"
name: "app-with-initial-container-logshipper"
nodeName: "minikube"
status:
containerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container"
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2024-10-08T08:44:25Z"
initContainerStatuses:
- image: "alpine:latest"
name: "app-with-initial-container-logshipper"
ready: true
restartCount: 0
started: false
state:
terminated:
exitCode: 0
reason: "Completed"
phase: "Running"
`)

info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
}

func TestGetPodInfoWithSidecar(t *testing.T) {
pod := strToUnstructured(`
apiVersion: v1
kind: Pod
metadata:
labels:
app: app-with-sidecar
name: app-with-sidecar-6664cc788c-lqlrp
namespace: default
ownerReferences:
- apiVersion: apps/v1
kind: ReplicaSet
name: app-with-sidecar-6664cc788c
spec:
containers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: app-with-sidecar
initContainers:
- image: 'docker.m.daocloud.io/library/alpine:latest'
imagePullPolicy: Always
name: logshipper
restartPolicy: Always
nodeName: minikube
status:
containerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: app-with-sidecar
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:43Z'
initContainerStatuses:
- image: 'docker.m.daocloud.io/library/alpine:latest'
name: logshipper
ready: true
restartCount: 0
started: true
state:
running:
startedAt: '2024-10-08T08:39:40Z'
phase: Running
`)

info := &ResourceInfo{}
populateNodeInfo(pod, info, []string{})
assert.Equal(t, []v1alpha1.InfoItem{
{Name: "Status Reason", Value: "Running"},
{Name: "Node", Value: "minikube"},
{Name: "Containers", Value: "1/1"},
}, info.Info)
}

func TestGetNodeInfo(t *testing.T) {
node := strToUnstructured(`
apiVersion: v1
Expand Down
42 changes: 37 additions & 5 deletions ui/src/app/applications/components/utils.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -987,23 +987,55 @@ export const OperationState = ({app, quiet}: {app: appModels.Application; quiet?
);
};

function isPodInitializedConditionTrue(status: any): boolean {
for (const condition of status.conditions) {
if (condition.type !== 'Initialized') {
continue;
}
return condition.status === 'True';
}

return false;
}

// isPodPhaseTerminal returns true if the pod's phase is terminal.
function isPodPhaseTerminal(phase: appModels.PodPhase): boolean {
return phase === appModels.PodPhase.PodFailed || phase === appModels.PodPhase.PodSucceeded;
}

export function getPodStateReason(pod: appModels.State): {message: string; reason: string; netContainerStatuses: any[]} {
let reason = pod.status.phase;
const podPhase = pod.status.phase;
let reason = podPhase;
let message = '';
if (pod.status.reason) {
reason = pod.status.reason;
}

let initializing = false;

let netContainerStatuses = pod.status.initContainerStatuses || [];
netContainerStatuses = netContainerStatuses.concat(pod.status.containerStatuses || []);

for (const condition of pod.status.conditions || []) {
if (condition.type === 'PodScheduled' && condition.reason === 'SchedulingGated') {
reason = 'SchedulingGated';
}
}

const initContainers: Record<string, any> = {};

for (const container of pod.spec.initContainers ?? []) {
initContainers[container.name] = container;
}

let initializing = false;
for (const container of (pod.status.initContainerStatuses || []).slice().reverse()) {
if (container.state.terminated && container.state.terminated.exitCode === 0) {
continue;
}

if (container.started && initContainers[container.name].restartPolicy === 'Always') {
continue;
}

if (container.state.terminated) {
if (container.state.terminated.reason) {
reason = `Init:ExitCode:${container.state.terminated.exitCode}`;
Expand All @@ -1021,7 +1053,7 @@ export function getPodStateReason(pod: appModels.State): {message: string; reaso
break;
}

if (!initializing) {
if (!initializing || isPodInitializedConditionTrue(pod.status)) {
let hasRunning = false;
for (const container of pod.status.containerStatuses || []) {
if (container.state.waiting && container.state.waiting.reason) {
Expand Down Expand Up @@ -1053,7 +1085,7 @@ export function getPodStateReason(pod: appModels.State): {message: string; reaso
if ((pod as any).metadata.deletionTimestamp && pod.status.reason === 'NodeLost') {
reason = 'Unknown';
message = '';
} else if ((pod as any).metadata.deletionTimestamp) {
} else if ((pod as any).metadata.deletionTimestamp && !isPodPhaseTerminal(podPhase)) {
reason = 'Terminating';
message = '';
}
Expand Down

0 comments on commit 5cbbc7d

Please sign in to comment.