diff --git a/agent/api/container/container.go b/agent/api/container/container.go index c77dab614e7..493fcb97fe5 100644 --- a/agent/api/container/container.go +++ b/agent/api/container/container.go @@ -23,6 +23,7 @@ import ( "time" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" + referenceutil "github.com/aws/amazon-ecs-agent/agent/utils/reference" apicontainerstatus "github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/ecs-agent/api/errors" "github.com/aws/amazon-ecs-agent/ecs-agent/credentials" @@ -1521,3 +1522,17 @@ func (c *Container) GetImageName() string { containerImage := strings.Split(c.Image, ":")[0] return containerImage } + +// Checks if the container has a resolved image manifest digest. +// Always returns false for internal containers as those are out-of-scope of digest resolution. +func (c *Container) DigestResolved() bool { + return !c.IsInternal() && c.GetImageDigest() != "" +} + +// Checks if the container's image requires manifest digest resolution. +// Manifest digest resolution is required if the container's image reference does not +// have a digest. +// Always returns false for internal containers as those are out-of-scope of digest resolution. +func (c *Container) DigestResolutionRequired() bool { + return !c.IsInternal() && referenceutil.GetDigestFromImageRef(c.Image) == "" +} diff --git a/agent/api/container/container_test.go b/agent/api/container/container_test.go index 8ee4f036cbc..5252bfefdba 100644 --- a/agent/api/container/container_test.go +++ b/agent/api/container/container_test.go @@ -1342,3 +1342,28 @@ func getContainer(hostConfig string, credentialSpecs []string) *Container { c.CredentialSpecs = credentialSpecs return c } + +func TestDigestResolved(t *testing.T) { + t.Run("never resolved for internal container", func(t *testing.T) { + assert.False(t, (&Container{Type: ContainerServiceConnectRelay}).DigestResolved()) + }) + t.Run("digest resolved if it is populated", func(t *testing.T) { + assert.True(t, (&Container{ImageDigest: "digest"}).DigestResolved()) + }) + t.Run("digest not resolved if it is not populated", func(t *testing.T) { + assert.False(t, (&Container{}).DigestResolved()) + }) +} + +func TestDigestResolutionRequired(t *testing.T) { + t.Run("never required for internal containers", func(t *testing.T) { + assert.False(t, (&Container{Type: ContainerServiceConnectRelay}).DigestResolutionRequired()) + }) + t.Run("required if not found in image reference", func(t *testing.T) { + assert.True(t, (&Container{Image: "alpine"}).DigestResolutionRequired()) + }) + t.Run("not required if found in image reference", func(t *testing.T) { + image := "ubuntu@sha256:ed6d2c43c8fbcd3eaa44c9dab6d94cb346234476230dc1681227aa72d07181ee" + assert.False(t, (&Container{Image: image}).DigestResolutionRequired()) + }) +} diff --git a/agent/api/statechange.go b/agent/api/statechange.go index 7df2b984082..c937541ba12 100644 --- a/agent/api/statechange.go +++ b/agent/api/statechange.go @@ -130,7 +130,7 @@ func NewTaskStateChangeEvent(task *apitask.Task, reason string) (TaskStateChange return event, ErrShouldNotSendEvent{task.Arn} } taskKnownStatus := task.GetKnownStatus() - if !taskKnownStatus.BackendRecognized() { + if taskKnownStatus != apitaskstatus.TaskManifestPulled && !taskKnownStatus.BackendRecognized() { return event, errors.Errorf( "create task state change event api: status not recognized by ECS: %v", taskKnownStatus) @@ -140,6 +140,14 @@ func NewTaskStateChangeEvent(task *apitask.Task, reason string) (TaskStateChange "create task state change event api: status [%s] already sent", taskKnownStatus.String()) } + if taskKnownStatus == apitaskstatus.TaskManifestPulled && !task.HasAContainerWithResolvedDigest() { + return event, ErrShouldNotSendEvent{ + fmt.Sprintf( + "create task state change event api: status %s not eligible for backend reporting as"+ + " no digests were resolved", + apitaskstatus.TaskManifestPulled.String()), + } + } event = TaskStateChange{ TaskARN: task.Arn, @@ -161,11 +169,21 @@ func NewContainerStateChangeEvent(task *apitask.Task, cont *apicontainer.Contain return event, err } contKnownStatus := cont.GetKnownStatus() - if !contKnownStatus.ShouldReportToBackend(cont.GetSteadyStateStatus()) { + if contKnownStatus != apicontainerstatus.ContainerManifestPulled && + !contKnownStatus.ShouldReportToBackend(cont.GetSteadyStateStatus()) { return event, ErrShouldNotSendEvent{fmt.Sprintf( "create container state change event api: status not recognized by ECS: %v", contKnownStatus)} } + if contKnownStatus == apicontainerstatus.ContainerManifestPulled && !cont.DigestResolved() { + // Transition to MANIFEST_PULLED state is sent to the backend only to report a resolved + // image manifest digest. No need to generate an event if the digest was not resolved + // which could happen due to various reasons. + return event, ErrShouldNotSendEvent{fmt.Sprintf( + "create container state change event api:"+ + " no need to send %s event as no resolved digests were found", + apicontainerstatus.ContainerManifestPulled.String())} + } if cont.GetSentStatus() >= contKnownStatus { return event, ErrShouldNotSendEvent{fmt.Sprintf( "create container state change event api: status [%s] already sent for container %s, task %s", @@ -196,7 +214,7 @@ func newUncheckedContainerStateChangeEvent(task *apitask.Task, cont *apicontaine TaskArn: task.Arn, ContainerName: cont.Name, RuntimeID: cont.GetRuntimeID(), - Status: contKnownStatus.BackendStatus(cont.GetSteadyStateStatus()), + Status: containerStatusChangeStatus(contKnownStatus, cont.GetSteadyStateStatus()), ExitCode: cont.GetKnownExitCode(), PortBindings: portBindings, ImageDigest: cont.GetImageDigest(), @@ -206,6 +224,27 @@ func newUncheckedContainerStateChangeEvent(task *apitask.Task, cont *apicontaine return event, nil } +// Maps container known status to a suitable status for ContainerStateChange. +// +// Returns ContainerRunning if known status matches steady state status, +// returns knownStatus if it is ContainerManifestPulled or ContainerStopped, +// returns ContainerStatusNone for all other cases. +func containerStatusChangeStatus( + knownStatus apicontainerstatus.ContainerStatus, + steadyStateStatus apicontainerstatus.ContainerStatus, +) apicontainerstatus.ContainerStatus { + switch knownStatus { + case steadyStateStatus: + return apicontainerstatus.ContainerRunning + case apicontainerstatus.ContainerManifestPulled: + return apicontainerstatus.ContainerManifestPulled + case apicontainerstatus.ContainerStopped: + return apicontainerstatus.ContainerStopped + default: + return apicontainerstatus.ContainerStatusNone + } +} + // NewManagedAgentChangeEvent creates a new managedAgent change event to convey managed agent state changes // returns error if the state change doesn't need to be sent to the ECS backend. func NewManagedAgentChangeEvent(task *apitask.Task, cont *apicontainer.Container, managedAgentName string, reason string) (ManagedAgentStateChange, error) { @@ -322,24 +361,6 @@ func (change *TaskStateChange) SetTaskTimestamps() { } } -// ShouldBeReported checks if the statechange should be reported to backend -func (change *TaskStateChange) ShouldBeReported() bool { - // Events that should be reported: - // 1. Normal task state change: RUNNING/STOPPED - // 2. Container state change, with task status in CREATED/RUNNING/STOPPED - // The task timestamp will be sent in both of the event type - // TODO Move the Attachment statechange check into this method - if change.Status == apitaskstatus.TaskRunning || change.Status == apitaskstatus.TaskStopped { - return true - } - - if len(change.Containers) != 0 { - return true - } - - return false -} - func (change *TaskStateChange) ToFields() logger.Fields { fields := logger.Fields{ "eventType": "TaskStateChange", @@ -494,8 +515,11 @@ func buildContainerStateChangePayload(change ContainerStateChange) (*ecsmodel.Co statechange.ImageDigest = aws.String(change.ImageDigest) } - stat := change.Status.String() - if stat != apicontainerstatus.ContainerStopped.String() && stat != apicontainerstatus.ContainerRunning.String() { + // TODO: This check already exists in NewContainerStateChangeEvent and shouldn't be repeated here; remove after verifying + stat := change.Status + if stat != apicontainerstatus.ContainerManifestPulled && + stat != apicontainerstatus.ContainerStopped && + stat != apicontainerstatus.ContainerRunning { logger.Warn("Not submitting unsupported upstream container state", logger.Fields{ field.Status: stat, field.ContainerName: change.ContainerName, @@ -503,10 +527,11 @@ func buildContainerStateChangePayload(change ContainerStateChange) (*ecsmodel.Co }) return nil, nil } - if stat == "DEAD" { - stat = apicontainerstatus.ContainerStopped.String() + // TODO: This check is probably redundant as String() method never returns "DEAD"; remove after verifying + if stat.String() == "DEAD" { + stat = apicontainerstatus.ContainerStopped } - statechange.Status = aws.String(stat) + statechange.Status = aws.String(stat.BackendStatusString()) if change.ExitCode != nil { exitCode := int64(aws.IntValue(change.ExitCode)) diff --git a/agent/api/statechange_test.go b/agent/api/statechange_test.go index 11fe5f3b506..ab60047158f 100644 --- a/agent/api/statechange_test.go +++ b/agent/api/statechange_test.go @@ -27,51 +27,13 @@ import ( "github.com/aws/amazon-ecs-agent/agent/engine/execcmd" apicontainerstatus "github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status" "github.com/aws/amazon-ecs-agent/ecs-agent/api/ecs/model/ecs" + ecsmodel "github.com/aws/amazon-ecs-agent/ecs-agent/api/ecs/model/ecs" apitaskstatus "github.com/aws/amazon-ecs-agent/ecs-agent/api/task/status" "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestShouldBeReported(t *testing.T) { - cases := []struct { - status apitaskstatus.TaskStatus - containerChange []ContainerStateChange - result bool - }{ - { // Normal task state change to running - status: apitaskstatus.TaskRunning, - result: true, - }, - { // Normal task state change to stopped - status: apitaskstatus.TaskStopped, - result: true, - }, - { // Container changed while task is not in steady state - status: apitaskstatus.TaskCreated, - containerChange: []ContainerStateChange{ - {TaskArn: "taskarn"}, - }, - result: true, - }, - { // No container change and task status not recognized - status: apitaskstatus.TaskCreated, - result: false, - }, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("task change status: %s, container change: %t", tc.status, len(tc.containerChange) > 0), - func(t *testing.T) { - taskChange := TaskStateChange{ - Status: tc.status, - Containers: tc.containerChange, - } - - assert.Equal(t, tc.result, taskChange.ShouldBeReported()) - }) - } -} - func TestSetTaskTimestamps(t *testing.T) { t1 := time.Now() t2 := t1.Add(time.Second) @@ -495,3 +457,374 @@ func getTestContainerStateChange() ContainerStateChange { return testContainerStateChange } + +func TestNewTaskStateChangeEvent(t *testing.T) { + tcs := []struct { + name string + task *apitask.Task + reason string + expected TaskStateChange + expectedError string + }{ + { + name: "internal tasks are never reported", + task: &apitask.Task{IsInternal: true, Arn: "arn"}, + expectedError: "should not send events for internal tasks or containers: arn", + }, + { + name: "manifest_pulled state is not reported if there are no resolved digests", + task: &apitask.Task{ + Arn: "arn", + KnownStatusUnsafe: apitaskstatus.TaskManifestPulled, + Containers: []*apicontainer.Container{ + {ImageDigest: ""}, + {ImageDigest: "digest", Type: apicontainer.ContainerCNIPause}, + }, + }, + expectedError: "should not send events for internal tasks or containers:" + + " create task state change event api: status MANIFEST_PULLED not eligible" + + " for backend reporting as no digests were resolved", + }, + { + name: "manifest_pulled state is reported", + task: &apitask.Task{ + Arn: "arn", + KnownStatusUnsafe: apitaskstatus.TaskManifestPulled, + Containers: []*apicontainer.Container{{ImageDigest: "digest"}}, + }, + expected: TaskStateChange{TaskARN: "arn", Status: apitaskstatus.TaskManifestPulled}, + }, + { + name: "created state is not reported", + task: &apitask.Task{Arn: "arn", KnownStatusUnsafe: apitaskstatus.TaskCreated}, + expectedError: "create task state change event api: status not recognized by ECS: CREATED", + }, + { + name: "running state is reported", + task: &apitask.Task{Arn: "arn", KnownStatusUnsafe: apitaskstatus.TaskRunning}, + expected: TaskStateChange{TaskARN: "arn", Status: apitaskstatus.TaskRunning}, + }, + { + name: "stopped state is reported", + task: &apitask.Task{Arn: "arn", KnownStatusUnsafe: apitaskstatus.TaskRunning}, + reason: "container stopped", + expected: TaskStateChange{ + TaskARN: "arn", Status: apitaskstatus.TaskRunning, Reason: "container stopped", + }, + }, + { + name: "already sent status is not reported again", + task: &apitask.Task{ + Arn: "arn", + KnownStatusUnsafe: apitaskstatus.TaskManifestPulled, + SentStatusUnsafe: apitaskstatus.TaskManifestPulled, + }, + expectedError: "create task state change event api: status [MANIFEST_PULLED] already sent", + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + res, err := NewTaskStateChangeEvent(tc.task, tc.reason) + if tc.expectedError == "" { + require.NoError(t, err) + tc.expected.Task = tc.task + assert.Equal(t, tc.expected, res) + } else { + assert.EqualError(t, err, tc.expectedError) + } + }) + } +} + +func TestNewContainerStateChangeEvent(t *testing.T) { + tcs := []struct { + name string + task *apitask.Task + reason string + expected ContainerStateChange + expectedError string + }{ + { + name: "internal containers are not reported", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + {Name: "container", Type: apicontainer.ContainerCNIPause}, + }, + }, + expectedError: "should not send events for internal tasks or containers: container", + }, + { + name: "MANIFEST_PULLED state is reported if digest was resolved", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "digest", + KnownStatusUnsafe: apicontainerstatus.ContainerManifestPulled, + }, + }, + }, + expected: ContainerStateChange{ + TaskArn: "arn", + ContainerName: "container", + Status: apicontainerstatus.ContainerManifestPulled, + ImageDigest: "digest", + }, + }, + { + name: "MANIFEST_PULLED state not is not reported if digest was not resolved", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "", + KnownStatusUnsafe: apicontainerstatus.ContainerManifestPulled, + }, + }, + }, + expectedError: "should not send events for internal tasks or containers:" + + " create container state change event api:" + + " no need to send MANIFEST_PULLED event" + + " as no resolved digests were found", + }, + { + name: "PULLED state is not reported", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "digest", + KnownStatusUnsafe: apicontainerstatus.ContainerPulled, + }, + }, + }, + expectedError: "should not send events for internal tasks or containers:" + + " create container state change event api: " + + "status not recognized by ECS: PULLED", + }, + { + name: "RUNNING state is reported", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "digest", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + }, + expected: ContainerStateChange{ + TaskArn: "arn", + ContainerName: "container", + Status: apicontainerstatus.ContainerRunning, + ImageDigest: "digest", + }, + }, + { + name: "STOPPED state is reported", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "digest", + KnownStatusUnsafe: apicontainerstatus.ContainerStopped, + }, + }, + }, + reason: "container stopped", + expected: ContainerStateChange{ + TaskArn: "arn", + ContainerName: "container", + Status: apicontainerstatus.ContainerStopped, + ImageDigest: "digest", + Reason: "container stopped", + }, + }, + { + name: "already sent state is not reported again", + task: &apitask.Task{ + Arn: "arn", + Containers: []*apicontainer.Container{ + { + Name: "container", + ImageDigest: "digest", + KnownStatusUnsafe: apicontainerstatus.ContainerRunning, + SentStatusUnsafe: apicontainerstatus.ContainerRunning, + }, + }, + }, + expectedError: "should not send events for internal tasks or containers:" + + " create container state change event api:" + + " status [RUNNING] already sent for container container, task arn", + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + res, err := NewContainerStateChangeEvent(tc.task, tc.task.Containers[0], tc.reason) + if tc.expectedError == "" { + require.NoError(t, err) + tc.expected.Container = tc.task.Containers[0] + assert.Equal(t, tc.expected, res) + } else { + assert.EqualError(t, err, tc.expectedError) + } + }) + } +} + +func TestContainerStatusChangeStatus(t *testing.T) { + // Mapped status is ContainerStatusNone when container status is ContainerStatusNone + var containerStatus apicontainerstatus.ContainerStatus + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerStatusNone) + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerStatusNone) + + // Mapped status is ContainerManifestPulled when container status is ContainerManifestPulled + containerStatus = apicontainerstatus.ContainerManifestPulled + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerManifestPulled) + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerManifestPulled) + + // Mapped status is ContainerStatusNone when container status is ContainerPulled + containerStatus = apicontainerstatus.ContainerPulled + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerStatusNone) + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerStatusNone) + + // Mapped status is ContainerStatusNone when container status is ContainerCreated + containerStatus = apicontainerstatus.ContainerCreated + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerStatusNone) + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerStatusNone) + + containerStatus = apicontainerstatus.ContainerRunning + // Mapped status is ContainerRunning when container status is ContainerRunning + // and steady state is ContainerRunning + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerRunning) + // Mapped status is ContainerStatusNone when container status is ContainerRunning + // and steady state is ContainerResourcesProvisioned + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerStatusNone) + + containerStatus = apicontainerstatus.ContainerResourcesProvisioned + // Mapped status is ContainerRunning when container status is ContainerResourcesProvisioned + // and steady state is ContainerResourcesProvisioned + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerRunning) + + // Mapped status is ContainerStopped when container status is ContainerStopped + containerStatus = apicontainerstatus.ContainerStopped + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerRunning), + apicontainerstatus.ContainerStopped) + assert.Equal(t, + containerStatusChangeStatus(containerStatus, apicontainerstatus.ContainerResourcesProvisioned), + apicontainerstatus.ContainerStopped) +} + +func TestBuildContainerStateChangePayload(t *testing.T) { + tcs := []struct { + name string + change ContainerStateChange + expected *ecsmodel.ContainerStateChange + expectedError string + }{ + { + name: "fails when no container name", + change: ContainerStateChange{}, + expectedError: "container state change has no container name", + }, + { + name: "no result no error when container state is unsupported", + change: ContainerStateChange{ + ContainerName: "container", + Status: apicontainerstatus.ContainerStatusNone, + }, + expected: nil, + }, + { + name: "MANIFEST_PULLED state maps to PENDING", + change: ContainerStateChange{ + ContainerName: "container", + Container: &apicontainer.Container{}, + Status: apicontainerstatus.ContainerManifestPulled, + ImageDigest: "digest", + }, + expected: &ecsmodel.ContainerStateChange{ + ContainerName: aws.String("container"), + ImageDigest: aws.String("digest"), + NetworkBindings: []*ecs.NetworkBinding{}, + Status: aws.String("PENDING"), + }, + }, + { + name: "RUNNING maps to RUNNING", + change: ContainerStateChange{ + ContainerName: "container", + Container: &apicontainer.Container{}, + Status: apicontainerstatus.ContainerRunning, + ImageDigest: "digest", + RuntimeID: "runtimeid", + }, + expected: &ecsmodel.ContainerStateChange{ + ContainerName: aws.String("container"), + ImageDigest: aws.String("digest"), + RuntimeId: aws.String("runtimeid"), + NetworkBindings: []*ecs.NetworkBinding{}, + Status: aws.String("RUNNING"), + }, + }, + { + name: "STOPPED maps to STOPPED", + change: ContainerStateChange{ + ContainerName: "container", + Container: &apicontainer.Container{}, + Status: apicontainerstatus.ContainerStopped, + ImageDigest: "digest", + RuntimeID: "runtimeid", + ExitCode: aws.Int(1), + }, + expected: &ecsmodel.ContainerStateChange{ + ContainerName: aws.String("container"), + ImageDigest: aws.String("digest"), + RuntimeId: aws.String("runtimeid"), + ExitCode: aws.Int64(1), + NetworkBindings: []*ecs.NetworkBinding{}, + Status: aws.String("STOPPED"), + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + res, err := buildContainerStateChangePayload(tc.change) + if tc.expectedError == "" { + require.NoError(t, err) + assert.Equal(t, tc.expected, res) + } else { + assert.EqualError(t, err, tc.expectedError) + } + }) + } +} diff --git a/agent/api/task/task.go b/agent/api/task/task.go index c7f5b96b518..9ef69bd8987 100644 --- a/agent/api/task/task.go +++ b/agent/api/task/task.go @@ -3721,3 +3721,14 @@ func (task *Task) IsRunning() bool { return taskStatus == apitaskstatus.TaskRunning } + +// Checks if the task has at least one container with a successfully +// resolved image manifest digest. +func (task *Task) HasAContainerWithResolvedDigest() bool { + for _, c := range task.Containers { + if c.DigestResolved() { + return true + } + } + return false +} diff --git a/agent/api/task/task_test.go b/agent/api/task/task_test.go index b38e4630ea2..d331728b8e7 100644 --- a/agent/api/task/task_test.go +++ b/agent/api/task/task_test.go @@ -5431,3 +5431,21 @@ func TestIsManagedDaemonTask(t *testing.T) { }) } } + +func TestHasAContainerWithResolvedDigest(t *testing.T) { + t.Run("false if no containers with a resolved digest", func(t *testing.T) { + task := &Task{ + Containers: []*apicontainer.Container{{}}, + } + assert.False(t, task.HasAContainerWithResolvedDigest()) + }) + t.Run("true if there is a container with a resolved digest", func(t *testing.T) { + task := &Task{ + Containers: []*apicontainer.Container{ + {}, + {ImageDigest: "digest"}, + }, + } + assert.True(t, task.HasAContainerWithResolvedDigest()) + }) +} diff --git a/agent/engine/common_integ_test.go b/agent/engine/common_integ_test.go index 00d7b94d06f..eeac803a941 100644 --- a/agent/engine/common_integ_test.go +++ b/agent/engine/common_integ_test.go @@ -141,6 +141,20 @@ func loggerConfigIntegrationTest(logfile string) string { return config } +func verifyContainerManifestPulledStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, apicontainerstatus.ContainerManifestPulled, event.(api.ContainerStateChange).Status, + "Expected container to be at MANIFEST_PULLED state") +} + +func verifyTaskManifestPulledStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, apitaskstatus.TaskManifestPulled, event.(api.TaskStateChange).Status, + "Expected task to reach MANIFEST_PULLED state") +} + func verifyContainerRunningStateChange(t *testing.T, taskEngine TaskEngine) { stateChangeEvents := taskEngine.StateChangeEvents() event := <-stateChangeEvents @@ -148,6 +162,13 @@ func verifyContainerRunningStateChange(t *testing.T, taskEngine TaskEngine) { "Expected container to be RUNNING") } +func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) { + stateChangeEvents := taskEngine.StateChangeEvents() + event := <-stateChangeEvents + assert.Equal(t, apitaskstatus.TaskRunning, event.(api.TaskStateChange).Status, + "Expected task to be RUNNING") +} + func verifyContainerRunningStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { stateChangeEvents := taskEngine.StateChangeEvents() event := <-stateChangeEvents diff --git a/agent/engine/common_test.go b/agent/engine/common_test.go index b816a123aca..b8bf2d93113 100644 --- a/agent/engine/common_test.go +++ b/agent/engine/common_test.go @@ -59,6 +59,7 @@ import ( const ( containerID = "containerID" waitTaskStateChangeDuration = 2 * time.Minute + testDigest = digest.Digest("sha256:ed6d2c43c8fbcd3eaa44c9dab6d94cb346234476230dc1681227aa72d07181ee") ) var ( @@ -111,6 +112,9 @@ func verifyTaskIsRunning(stateChangeEvents <-chan statechange.Event, task *apita if taskEvent.TaskARN != task.Arn { continue } + if taskEvent.Status == apitaskstatus.TaskManifestPulled { + continue + } if taskEvent.Status == apitaskstatus.TaskRunning { return nil } @@ -287,6 +291,7 @@ func addTaskToEngine(t *testing.T, assert.NoError(t, err) taskEngine.AddTask(sleepTask) + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForRunningEvents(t, taskEngine.StateChangeEvents()) // Wait for all events to be consumed prior to moving it towards stopped; we // don't want to race the below with these or we'll end up with the "going @@ -321,6 +326,18 @@ func waitForRunningEvents(t *testing.T, stateChangeEvents <-chan statechange.Eve } } +// waitForManifestPulledEvents waits for a task to emit 'MANIFEST_PULLED' events for a container +// and the task +func waitForManifestPulledEvents(t *testing.T, stateChangeEvents <-chan statechange.Event) { + event := <-stateChangeEvents + assert.Equal(t, apicontainerstatus.ContainerManifestPulled, event.(api.ContainerStateChange).Status, + "Expected MANIFEST_PULLED state to be emitted for the container") + + event = <-stateChangeEvents + assert.Equal(t, apitaskstatus.TaskManifestPulled, event.(api.TaskStateChange).Status, + "Expected MANIFEST_PULLED state to be emitted for the task") +} + // waitForStopEvents waits for a task to emit 'STOPPED' events for a container // and the task func waitForStopEvents(t *testing.T, stateChangeEvents <-chan statechange.Event, verifyExitCode, execEnabled bool) { diff --git a/agent/engine/docker_task_engine.go b/agent/engine/docker_task_engine.go index 9c2136ff732..870d0cedd9c 100644 --- a/agent/engine/docker_task_engine.go +++ b/agent/engine/docker_task_engine.go @@ -981,7 +981,7 @@ func (engine *DockerTaskEngine) EmitTaskEvent(task *apitask.Task, reason string) event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { if _, ok := err.(api.ErrShouldNotSendEvent); ok { - logger.Debug(err.Error()) + logger.Debug(err.Error(), logger.Fields{field.TaskID: task.GetID()}) } else { logger.Error("Unable to create task state change event", logger.Fields{ field.TaskID: task.GetID(), @@ -1241,8 +1241,14 @@ func (engine *DockerTaskEngine) GetDaemonManagers() map[string]dm.DaemonManager func (engine *DockerTaskEngine) pullContainerManifest( task *apitask.Task, container *apicontainer.Container, ) dockerapi.DockerContainerMetadata { - if container.IsInternal() { - // internal containers are not in-scope of digest resolution + if !container.DigestResolutionRequired() { + // Digest resolution not required + // (internal container or already has digest in image reference) + logger.Info("Digest resolution not required", logger.Fields{ + field.TaskARN: task.Arn, + field.ContainerName: container.Name, + field.Image: container.Image, + }) return dockerapi.DockerContainerMetadata{} } // AppNet Agent container image is managed at start up so it is not in-scope for digest resolution. @@ -1252,17 +1258,7 @@ func (engine *DockerTaskEngine) pullContainerManifest( } var imageManifestDigest digest.Digest - digestFromPayload := referenceutil.GetDigestFromImageRef(container.Image) - if digestFromPayload != "" { - // Digest available in task payload - imageManifestDigest = digestFromPayload - logger.Info("Found image manifest digest in task payload", logger.Fields{ - field.TaskARN: task.Arn, - field.ContainerName: container.Name, - field.Image: container.Image, - field.ImageDigest: imageManifestDigest.String(), - }) - } else if !engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.GetID()) { + if !engine.imagePullRequired(engine.cfg.ImagePullBehavior, container, task.GetID()) { // Image pull is not required for the container so we will use a locally available // image for the container. Get digest from a locally available image. imgInspect, err := engine.client.InspectImage(container.Image) diff --git a/agent/engine/docker_task_engine_linux_test.go b/agent/engine/docker_task_engine_linux_test.go index 759649a0859..abea4639bc6 100644 --- a/agent/engine/docker_task_engine_linux_test.go +++ b/agent/engine/docker_task_engine_linux_test.go @@ -58,6 +58,7 @@ import ( "github.com/aws/amazon-ecs-agent/ecs-agent/credentials" "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/appmesh" ni "github.com/aws/amazon-ecs-agent/ecs-agent/netlib/model/networkinterface" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/aws/aws-sdk-go/aws" cniTypesCurrent "github.com/containernetworking/cni/pkg/types/100" @@ -120,6 +121,7 @@ func TestResourceContainerProgression(t *testing.T) { // Mock versioned docker client to be used for transition to MANIFEST_PULLED state manifestPullClient := mock_dockerapi.NewMockDockerClient(ctrl) + expectedCanonicalRef := sleepContainer.Image + "@" + testDigest.String() // Hierarchical memory accounting is always enabled in CgroupV2 and no controller file exists to configure it if config.CgroupV2 { gomock.InOrder( @@ -130,8 +132,15 @@ func TestResourceContainerProgression(t *testing.T) { client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil), manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), sleepContainer.Image, nil). - Return(registry.DistributionInspect{}, nil), - client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + Return(registry.DistributionInspect{ + Descriptor: ocispec.Descriptor{Digest: testDigest}, + }, nil), + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, sleepContainer.Image). + Return(nil), imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil), imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false), client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), @@ -165,8 +174,15 @@ func TestResourceContainerProgression(t *testing.T) { client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil), manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), sleepContainer.Image, nil). - Return(registry.DistributionInspect{}, nil), - client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + Return(registry.DistributionInspect{ + Descriptor: ocispec.Descriptor{Digest: testDigest}, + }, nil), + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, sleepContainer.Image). + Return(nil), imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil), imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false), client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), @@ -313,7 +329,7 @@ func TestResourceContainerProgressionFailure(t *testing.T) { sleepContainer := sleepTask.Containers[0] sleepContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet) - sleepContainer.BuildResourceDependency("cgroup", resourcestatus.ResourceCreated, apicontainerstatus.ContainerPulled) + sleepContainer.BuildResourceDependency("cgroup", resourcestatus.ResourceCreated, apicontainerstatus.ContainerManifestPulled) mockControl := mock_control.NewMockControl(ctrl) taskID := sleepTask.GetID() @@ -335,14 +351,6 @@ func TestResourceContainerProgressionFailure(t *testing.T) { ) mockTime.EXPECT().Now().Return(time.Now()).AnyTimes() - // Container might progress to MANIFEST_PULLED state (there is a race condition) - manifestPullClient := mock_dockerapi.NewMockDockerClient(ctrl) - client.EXPECT().WithVersion(dockerclient.Version_1_35).AnyTimes().Return(manifestPullClient, nil) - manifestPullClient.EXPECT(). - PullImageManifest(gomock.Any(), sleepContainer.Image, nil). - AnyTimes(). - Return(registry.DistributionInspect{}, nil) - err := taskEngine.Init(ctx) assert.NoError(t, err) @@ -702,9 +710,15 @@ func TestTaskWithSteadyStateResourcesProvisioned(t *testing.T) { client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil) manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), sleepContainer.Image, sleepContainer.RegistryAuthentication). - Return(registry.DistributionInspect{}, nil) - - client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + Return(registry.DistributionInspect{Descriptor: ocispec.Descriptor{Digest: testDigest}}, nil) + + expectedCanonicalRef := sleepContainer.Image + "@" + testDigest.String() + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}) + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, sleepContainer.Image). + Return(nil) imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil) imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false) diff --git a/agent/engine/docker_task_engine_test.go b/agent/engine/docker_task_engine_test.go index d4607dc24dd..852b3891b29 100644 --- a/agent/engine/docker_task_engine_test.go +++ b/agent/engine/docker_task_engine_test.go @@ -451,8 +451,15 @@ func TestStartTimeoutThenStart(t *testing.T) { client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil) manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), container.Image, container.RegistryAuthentication). - Return(registry.DistributionInspect{}, nil) - client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + Return(registry.DistributionInspect{ + Descriptor: ocispec.Descriptor{Digest: testDigest}, + }, nil) + client.EXPECT(). + PullImage(gomock.Any(), container.Image+"@"+testDigest.String(), nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}) + client.EXPECT(). + TagImage(gomock.Any(), container.Image+"@"+testDigest.String(), container.Image). + Return(nil) imageManager.EXPECT().RecordContainerReference(container) imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false) @@ -476,6 +483,7 @@ func TestStartTimeoutThenStart(t *testing.T) { assert.NoError(t, err) stateChangeEvents := taskEngine.StateChangeEvents() taskEngine.AddTask(sleepTask) + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForStopEvents(t, taskEngine.StateChangeEvents(), false, false) // Now surprise surprise, it actually did start! @@ -539,6 +547,7 @@ func TestSteadyStatePoll(t *testing.T) { err := taskEngine.Init(ctx) // start the task engine assert.NoError(t, err) taskEngine.AddTask(sleepTask) // actually add the task we created + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForRunningEvents(t, taskEngine.StateChangeEvents()) containerMap, ok := taskEngine.(*DockerTaskEngine).State().ContainerMapByArn(sleepTask.Arn) assert.True(t, ok) @@ -798,8 +807,14 @@ func TestTaskTransitionWhenStopContainerTimesout(t *testing.T) { client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil) manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), container.Image, container.RegistryAuthentication). - Return(registry.DistributionInspect{}, nil) - client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + Return(registry.DistributionInspect{Descriptor: ocispec.Descriptor{Digest: testDigest}}, nil) + expectedCanonicalRef := container.Image + "@" + testDigest.String() + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}) + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, container.Image). + Return(nil) imageManager.EXPECT().RecordContainerReference(container) imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false) client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil) @@ -832,6 +847,7 @@ func TestTaskTransitionWhenStopContainerTimesout(t *testing.T) { go taskEngine.AddTask(sleepTask) // wait for task running + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForRunningEvents(t, taskEngine.StateChangeEvents()) // Set the task desired status to be stopped and StopContainer will be called updateSleepTask := testdata.LoadTask("sleep5") @@ -862,13 +878,21 @@ func TestTaskTransitionWhenStopContainerReturnsUnretriableError(t *testing.T) { containerEventsWG := sync.WaitGroup{} manifestPullClient := mock_dockerapi.NewMockDockerClient(ctrl) for _, container := range sleepTask.Containers { + expectedCanonicalRef := container.Image + "@" + testDigest.String() gomock.InOrder( imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(), client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil), manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), container.Image, container.RegistryAuthentication). - Return(registry.DistributionInspect{}, nil), - client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + Return( + registry.DistributionInspect{Descriptor: ocispec.Descriptor{Digest: testDigest}}, + nil), + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, container.Image). + Return(nil), imageManager.EXPECT().RecordContainerReference(container), imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false), client.EXPECT().APIVersion().Return(defaultDockerClientAPIVersion, nil), @@ -912,6 +936,7 @@ func TestTaskTransitionWhenStopContainerReturnsUnretriableError(t *testing.T) { go taskEngine.AddTask(sleepTask) // wait for task running + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForRunningEvents(t, taskEngine.StateChangeEvents()) containerEventsWG.Wait() // Set the task desired status to be stopped and StopContainer will be called @@ -943,13 +968,19 @@ func TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding(t } manifestPullClient := mock_dockerapi.NewMockDockerClient(ctrl) for _, container := range sleepTask.Containers { + expectedCanonicalRef := container.Image + "@" + testDigest.String() gomock.InOrder( imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes(), client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil), manifestPullClient.EXPECT(). PullImageManifest(gomock.Any(), container.Image, container.RegistryAuthentication). - Return(registry.DistributionInspect{}, nil), - client.EXPECT().PullImage(gomock.Any(), container.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}), + Return(registry.DistributionInspect{Descriptor: ocispec.Descriptor{Digest: testDigest}}, nil), + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}), + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, container.Image). + Return(nil), imageManager.EXPECT().RecordContainerReference(container), imageManager.EXPECT().GetImageStateFromImageName(gomock.Any()).Return(nil, false), // Simulate successful create container @@ -974,6 +1005,7 @@ func TestTaskTransitionWhenStopContainerReturnsTransientErrorBeforeSucceeding(t go taskEngine.AddTask(sleepTask) // wait for task running + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) waitForRunningEvents(t, taskEngine.StateChangeEvents()) // Set the task desired status to be stopped and StopContainer will be called updateSleepTask := testdata.LoadTask("sleep5") @@ -2437,10 +2469,13 @@ func TestContainerProgressParallize(t *testing.T) { taskEngine.Init(ctx) taskEngine.AddTask(testTask) - // Expect the fast pulled container to be running firs + // Expect the fast pulled container to be running first fastPullContainerRunning := false for event := range stateChangeEvents { containerEvent, ok := event.(api.ContainerStateChange) + if ok && containerEvent.Status == apicontainerstatus.ContainerManifestPulled { + continue + } if ok && containerEvent.Status == apicontainerstatus.ContainerRunning { if containerEvent.ContainerName == fastPullImage { fastPullContainerRunning = true @@ -2453,6 +2488,9 @@ func TestContainerProgressParallize(t *testing.T) { } taskEvent, ok := event.(api.TaskStateChange) + if ok && taskEvent.Status == apitaskstatus.TaskManifestPulled { + continue + } if ok && taskEvent.Status == apitaskstatus.TaskRunning { break } @@ -4029,9 +4067,8 @@ func TestPullContainerManifest(t *testing.T) { containerName: "my-sc-container", }, { - name: "digest available in image reference", - image: "public.ecr.aws/library/alpine@" + testDigest.String(), - expectedDigest: testDigest.String(), + name: "digest is not resolved if already available in image reference", + image: "public.ecr.aws/library/alpine@" + testDigest.String(), }, { name: "image pull not required - image inspect fails", @@ -4458,6 +4495,10 @@ func TestManifestPullTaskShouldContinue(t *testing.T) { taskEngine.AddTask(task) // Wait for the task to reach RUNNING + if !tc.shouldPullWithoutCanonicalRef { + // MANIFEST_PULLED event is emitted only if image digest is resolved + waitForManifestPulledEvents(t, taskEngine.StateChangeEvents()) + } waitForRunningEvents(t, taskEngine.StateChangeEvents()) dockerEventsSent.Wait() diff --git a/agent/engine/docker_task_engine_windows_test.go b/agent/engine/docker_task_engine_windows_test.go index 8ba66772a90..85f826b9cb6 100644 --- a/agent/engine/docker_task_engine_windows_test.go +++ b/agent/engine/docker_task_engine_windows_test.go @@ -47,6 +47,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/registry" "github.com/golang/mock/gomock" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -324,10 +325,18 @@ func TestTaskWithSteadyStateResourcesProvisioned(t *testing.T) { imageManager.EXPECT().AddAllImageStates(gomock.Any()).AnyTimes() manifestPullClient := mock_dockerapi.NewMockDockerClient(ctrl) client.EXPECT().WithVersion(dockerclient.Version_1_35).Return(manifestPullClient, nil) + expectedCanonicalRef := sleepContainer.Image + "@" + testDigest.String() manifestPullClient.EXPECT(). - PullImageManifest(gomock.Any(), gomock.Any(), gomock.Any()). - Return(registry.DistributionInspect{}, nil) - client.EXPECT().PullImage(gomock.Any(), sleepContainer.Image, nil, gomock.Any()).Return(dockerapi.DockerContainerMetadata{}) + PullImageManifest(gomock.Any(), sleepContainer.Image, nil). + Return(registry.DistributionInspect{ + Descriptor: ocispec.Descriptor{Digest: testDigest}, + }, nil) + client.EXPECT(). + PullImage(gomock.Any(), expectedCanonicalRef, nil, gomock.Any()). + Return(dockerapi.DockerContainerMetadata{}) + client.EXPECT(). + TagImage(gomock.Any(), expectedCanonicalRef, sleepContainer.Image). + Return(nil) imageManager.EXPECT().RecordContainerReference(sleepContainer).Return(nil) imageManager.EXPECT().GetImageStateFromImageName(sleepContainer.Image).Return(nil, false) diff --git a/agent/engine/engine_integ_test.go b/agent/engine/engine_integ_test.go index 905b2adff82..ae50b469f76 100644 --- a/agent/engine/engine_integ_test.go +++ b/agent/engine/engine_integ_test.go @@ -81,13 +81,6 @@ func setupWithState(t *testing.T, state dockerstate.TaskEngineState) (TaskEngine return setup(defaultTestConfigIntegTest(), state, t) } -func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) { - stateChangeEvents := taskEngine.StateChangeEvents() - event := <-stateChangeEvents - assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, - "Expected task to be RUNNING") -} - func verifyTaskRunningStateChangeWithRuntimeID(t *testing.T, taskEngine TaskEngine) { stateChangeEvents := taskEngine.StateChangeEvents() event := <-stateChangeEvents @@ -207,6 +200,8 @@ func TestHostVolumeMount(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -234,6 +229,8 @@ func TestSweepContainer(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -276,6 +273,8 @@ func TestStartStopWithCredentials(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -295,6 +294,8 @@ func TestStartStopWithRuntimeID(t *testing.T) { testTask := createTestTask("testTaskWithContainerID") go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChangeWithRuntimeID(t, taskEngine) verifyTaskRunningStateChangeWithRuntimeID(t, taskEngine) verifyContainerStoppedStateChangeWithRuntimeID(t, taskEngine) @@ -329,6 +330,8 @@ func TestContainerHealthCheck(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) @@ -354,6 +357,8 @@ func TestEngineSynchronize(t *testing.T) { // Start a task go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) // Record the container information @@ -436,6 +441,8 @@ func TestLabels(t *testing.T) { "label1":"" }}`)} go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -478,6 +485,8 @@ func TestLogDriverOptions(t *testing.T) { } }}`)} go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -526,6 +535,8 @@ func testNetworkMode(t *testing.T, networkMode string) { HostConfig: aws.String(fmt.Sprintf(`{"NetworkMode":"%s"}`, networkMode))} go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -570,6 +581,8 @@ func TestTaskCleanup(t *testing.T) { testTask := createTestTask(testArn) go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -631,6 +644,13 @@ func TestManifestPulledDoesNotDependOnContainerOrdering(t *testing.T) { // Start the task and wait for first container to start running go taskEngine.AddTask(task) + + // Both containers and the task should reach MANIFEST_PULLED state and emit events for it + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + + // The first container should start running verifyContainerRunningStateChange(t, taskEngine) // The first container should be in RUNNING state @@ -664,16 +684,18 @@ func TestPullContainerManifestInteg(t *testing.T) { config.ImagePullOnceBehavior, config.ImagePullPreferCachedBehavior, } tcs := []struct { - name string - image string - setConfig func(c *config.Config) - imagePullBehaviors []config.ImagePullBehaviorType - assertError func(t *testing.T, err error) + name string + image string + setConfig func(c *config.Config) + imagePullBehaviors []config.ImagePullBehaviorType + digestResolutionNotNeeded bool + assertError func(t *testing.T, err error) }{ { - name: "digest available in image reference", - image: "ubuntu@sha256:c3839dd800b9eb7603340509769c43e146a74c63dca3045a8e7dc8ee07e53966", - imagePullBehaviors: allPullBehaviors, + name: "digest available in image reference", + image: "ubuntu@sha256:c3839dd800b9eb7603340509769c43e146a74c63dca3045a8e7dc8ee07e53966", + digestResolutionNotNeeded: true, + imagePullBehaviors: allPullBehaviors, }, { name: "digest can be resolved from explicit tag", @@ -724,7 +746,11 @@ func TestPullContainerManifestInteg(t *testing.T) { res := taskEngine.(*DockerTaskEngine).pullContainerManifest(task, container) if tc.assertError == nil { require.NoError(t, res.Error) - assert.NotEmpty(t, container.GetImageDigest()) + if tc.digestResolutionNotNeeded { + assert.Empty(t, container.GetImageDigest()) + } else { + assert.NotEmpty(t, container.GetImageDigest()) + } } else { tc.assertError(t, res.Error) } @@ -916,7 +942,7 @@ func TestImageWithDigestInteg(t *testing.T) { // Start the task go taskEngine.AddTask(task) - // The task should run + // The task should run. No MANIFEST_PULLED events expected. verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) assert.Equal(t, imageDigest, container.GetImageDigest()) diff --git a/agent/engine/engine_sudo_linux_integ_test.go b/agent/engine/engine_sudo_linux_integ_test.go index decf3334aa2..6a41960eafb 100644 --- a/agent/engine/engine_sudo_linux_integ_test.go +++ b/agent/engine/engine_sudo_linux_integ_test.go @@ -47,7 +47,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/aws/amazon-ecs-agent/agent/api" apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" apitask "github.com/aws/amazon-ecs-agent/agent/api/task" "github.com/aws/amazon-ecs-agent/agent/config" @@ -130,6 +129,9 @@ func TestStartStopWithCgroup(t *testing.T) { } go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) @@ -167,6 +169,8 @@ func TestLocalHostVolumeMount(t *testing.T) { stateChangeEvents := taskEngine.StateChangeEvents() go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) verifyContainerStoppedStateChange(t, taskEngine) @@ -434,6 +438,8 @@ func TestExecCommandAgent(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -526,6 +532,8 @@ func TestManagedAgentEvent(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -794,13 +802,6 @@ func killMockExecCommandAgent(t *testing.T, client *sdkClient.Client, containerI require.NoError(t, err) } -func verifyTaskRunningStateChange(t *testing.T, taskEngine TaskEngine) { - stateChangeEvents := taskEngine.StateChangeEvents() - event := <-stateChangeEvents - assert.Equal(t, event.(api.TaskStateChange).Status, apitaskstatus.TaskRunning, - "Expected task to be RUNNING") -} - func TestGMSATaskFile(t *testing.T) { t.Setenv("ECS_GMSA_SUPPORTED", "True") t.Setenv("ZZZ_SKIP_DOMAIN_JOIN_CHECK_NOT_SUPPORTED_IN_PRODUCTION", "True") diff --git a/agent/engine/engine_unix_integ_test.go b/agent/engine/engine_unix_integ_test.go index 57d2a40eea0..e6f743051e7 100644 --- a/agent/engine/engine_unix_integ_test.go +++ b/agent/engine/engine_unix_integ_test.go @@ -269,6 +269,8 @@ func TestStartStopUnpulledImage(t *testing.T) { testTask := createTestTask("testStartUnpulled") go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -449,6 +451,10 @@ func TestDynamicPortForward(t *testing.T) { go taskEngine.AddTask(testTask) event := <-stateChangeEvents + require.Equal(t, apicontainerstatus.ContainerManifestPulled, event.(api.ContainerStateChange).Status, "Expected container to reach MANIFEST_PULLED state") + event = <-stateChangeEvents + require.Equal(t, apitaskstatus.TaskManifestPulled, event.(api.TaskStateChange).Status, "Expected task to reach MANIFEST_PULLED state") + event = <-stateChangeEvents require.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") portBindings := event.(api.ContainerStateChange).PortBindings @@ -504,6 +510,10 @@ func TestMultipleDynamicPortForward(t *testing.T) { go taskEngine.AddTask(testTask) event := <-stateChangeEvents + require.Equal(t, apicontainerstatus.ContainerManifestPulled, event.(api.ContainerStateChange).Status, "Expected container to reach MANIFEST_PULLED state") + event = <-stateChangeEvents + require.Equal(t, apitaskstatus.TaskManifestPulled, event.(api.TaskStateChange).Status, "Expected task to reach MANIFEST_PULLED state") + event = <-stateChangeEvents require.Equal(t, event.(api.ContainerStateChange).Status, apicontainerstatus.ContainerRunning, "Expected container to be RUNNING") portBindings := event.(api.ContainerStateChange).PortBindings @@ -699,6 +709,8 @@ func TestInitOOMEvent(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -753,6 +765,8 @@ func TestSignalEvent(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -814,6 +828,8 @@ func TestDockerStopTimeout(t *testing.T) { go dockerTaskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -840,6 +856,8 @@ func TestStartStopWithSecurityOptionNoNewPrivileges(t *testing.T) { go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -883,6 +901,8 @@ func TestSwapConfigurationTask(t *testing.T) { testTask.Containers[0].DockerConfig = apicontainer.DockerConfig{HostConfig: aws.String(`{"MemorySwap":314572800, "MemorySwappiness":90}`)} go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -930,6 +950,8 @@ func TestPerContainerStopTimeout(t *testing.T) { go dockerTaskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -961,6 +983,8 @@ func TestMemoryOverCommit(t *testing.T) { "MemoryReservation": 52428800 }`)} go taskEngine.AddTask(testTask) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -1021,6 +1045,8 @@ func TestFluentdTag(t *testing.T) { SourceVolume: "logs"}} testTaskFleuntdDriver.Containers[0].Ports = []apicontainer.PortBinding{{ContainerPort: 24224, HostPort: 24224}} go taskEngine.AddTask(testTaskFleuntdDriver) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -1041,6 +1067,8 @@ func TestFluentdTag(t *testing.T) { }}`)} go taskEngine.AddTask(testTaskFluentdLogTag) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskRunningStateChange(t, taskEngine) @@ -1101,6 +1129,8 @@ func TestDockerExecAPI(t *testing.T) { finished := make(chan interface{}) go func() { // Both containers should start + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) @@ -1176,9 +1206,13 @@ func TestHostResourceManagerTrickleQueue(t *testing.T) { // goroutine to verify task running order go func() { // Tasks go RUNNING in order + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[0]) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[1]) @@ -1186,6 +1220,8 @@ func TestHostResourceManagerTrickleQueue(t *testing.T) { verifyContainerStoppedStateChange(t, taskEngine) verifyTaskIsStopped(stateChangeEvents, tasks[0]) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[2]) @@ -1264,9 +1300,13 @@ func TestHostResourceManagerResourceUtilization(t *testing.T) { go func() { // Tasks go RUNNING in order, 2nd task doesn't wait for 1st task // to transition to STOPPED as resources are available + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[0]) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[1]) @@ -1350,6 +1390,10 @@ func TestHostResourceManagerStopTaskNotBlockWaitingTasks(t *testing.T) { // goroutine to verify task running order and verify assertions go func() { + // First task goes to MANIFEST_PULLED + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // 1st task goes to RUNNING verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, tasks[0]) @@ -1455,6 +1499,8 @@ func TestHostResourceManagerLaunchTypeBehavior(t *testing.T) { // goroutine to verify task running order and verify assertions go func() { // Task goes to RUNNING + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) diff --git a/agent/engine/ordering_integ_test.go b/agent/engine/ordering_integ_test.go index 829dd6137ef..d4a2e008f56 100644 --- a/agent/engine/ordering_integ_test.go +++ b/agent/engine/ordering_integ_test.go @@ -75,6 +75,9 @@ func TestDependencyHealthCheck(t *testing.T) { finished := make(chan interface{}) go func() { // Both containers should start + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyTaskIsRunning(stateChangeEvents, testTask) @@ -131,6 +134,11 @@ func TestDependencyComplete(t *testing.T) { finished := make(chan interface{}) go func() { + // Both containers and the task should reach MANIFEST_PULLED regardless of ordering + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // First container should run to completion and then exit verifyContainerRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -186,6 +194,11 @@ func TestDependencyStart(t *testing.T) { finished := make(chan interface{}) go func() { + // Both containers and the task should go to MANIFEST_PULLED state + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // 'dependency' container should run first, followed by the 'parent' container verifySpecificContainerStateChange(t, taskEngine, "dependency", status.ContainerRunning) verifySpecificContainerStateChange(t, taskEngine, "parent", status.ContainerRunning) @@ -245,6 +258,11 @@ func TestDependencySuccess(t *testing.T) { finished := make(chan interface{}) go func() { + // All containers and the task should reach MANIFEST_PULLED + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // First container should run to completion verifyContainerRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -304,6 +322,11 @@ func TestDependencySuccessErrored(t *testing.T) { finished := make(chan interface{}) go func() { + // Both containers and the task should reach MANIFEST_PULLED state + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // First container should run to completion verifyContainerRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -360,6 +383,12 @@ func TestDependencySuccessTimeout(t *testing.T) { finished := make(chan interface{}) go func() { + // All containers and the task should reach MANIFEST_PULLED + for _ = range testTask.Containers { + verifyContainerManifestPulledStateChange(t, taskEngine) + } + verifyTaskManifestPulledStateChange(t, taskEngine) + // First container should run to completion verifyContainerRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -423,6 +452,11 @@ func TestDependencyHealthyTimeout(t *testing.T) { finished := make(chan interface{}) go func() { + // Both containers and the task should reach MANIFEST_PULLED + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + // First container should run to completion verifyContainerRunningStateChange(t, taskEngine) verifyContainerStoppedStateChange(t, taskEngine) @@ -500,6 +534,11 @@ func TestShutdownOrder(t *testing.T) { finished := make(chan interface{}) go func() { // Everything should first progress to running + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) @@ -589,6 +628,12 @@ func TestMultipleContainerDependency(t *testing.T) { finished := make(chan interface{}) go func() { + // All containers and the task should reach MANIFEST_PULLED regardless of dependency + for _ = range testTask.Containers { + verifyContainerManifestPulledStateChange(t, taskEngine) + } + verifyTaskManifestPulledStateChange(t, taskEngine) + // Only exit should first progress to running verifyContainerRunningStateChange(t, taskEngine) diff --git a/agent/engine/ordering_integ_unix_test.go b/agent/engine/ordering_integ_unix_test.go index 1878e4bdb54..6a45a37e45a 100644 --- a/agent/engine/ordering_integ_unix_test.go +++ b/agent/engine/ordering_integ_unix_test.go @@ -77,6 +77,11 @@ func TestGranularStopTimeout(t *testing.T) { finished := make(chan interface{}) go func() { + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyContainerManifestPulledStateChange(t, taskEngine) + verifyTaskManifestPulledStateChange(t, taskEngine) + verifyContainerRunningStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) verifyContainerRunningStateChange(t, taskEngine) diff --git a/agent/engine/task_manager.go b/agent/engine/task_manager.go index 9f4703c6b5a..661e1f90d2c 100644 --- a/agent/engine/task_manager.go +++ b/agent/engine/task_manager.go @@ -604,6 +604,7 @@ func getContainerEventLogFields(c api.ContainerStateChange) logger.Fields { if c.Container != nil { f["KnownSent"] = c.Container.GetSentStatus().String() } + f["KnownStatus"] = c.Container.GetKnownStatus() return f } @@ -618,7 +619,7 @@ func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) { logger.Critical("Failed to release resources after tast stopped", logger.Fields{field.TaskARN: mtask.Arn}) } } - if !taskKnownStatus.BackendRecognized() { + if taskKnownStatus != apitaskstatus.TaskManifestPulled && !taskKnownStatus.BackendRecognized() { logger.Debug("Skipping event emission for task", logger.Fields{ field.TaskID: mtask.GetID(), field.Error: "status not recognized by ECS", @@ -629,7 +630,7 @@ func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) { event, err := api.NewTaskStateChangeEvent(task, reason) if err != nil { if _, ok := err.(api.ErrShouldNotSendEvent); ok { - logger.Debug(err.Error()) + logger.Debug(err.Error(), logger.Fields{field.TaskID: mtask.GetID()}) } else { logger.Error("Skipping emitting event for task due to error", logger.Fields{ field.TaskID: mtask.GetID(), @@ -698,7 +699,10 @@ func (mtask *managedTask) emitContainerEvent(task *apitask.Task, cont *apicontai event, err := api.NewContainerStateChangeEvent(task, cont, reason) if err != nil { if _, ok := err.(api.ErrShouldNotSendEvent); ok { - logger.Debug(err.Error()) + logger.Debug(err.Error(), logger.Fields{ + field.TaskID: mtask.GetID(), + field.Container: cont.Name, + }) } else { logger.Error("Skipping emitting event for container due to error", logger.Fields{ field.TaskID: mtask.GetID(), diff --git a/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status/containerstatus.go b/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status/containerstatus.go index 53686f01832..f579370ba5b 100644 --- a/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status/containerstatus.go +++ b/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/api/container/status/containerstatus.go @@ -102,20 +102,6 @@ func (cs *ContainerStatus) ShouldReportToBackend(steadyStateStatus ContainerStat return *cs == steadyStateStatus || *cs == ContainerStopped } -// BackendStatus maps the internal container status in the agent to that in the -// backend -func (cs *ContainerStatus) BackendStatus(steadyStateStatus ContainerStatus) ContainerStatus { - if *cs == steadyStateStatus { - return ContainerRunning - } - - if *cs == ContainerStopped { - return ContainerStopped - } - - return ContainerStatusNone -} - // BackendStatusString maps the internal container status in Agent to a backend recognized // status string. func (cs ContainerStatus) BackendStatusString() string { diff --git a/ecs-agent/api/container/status/containerstatus.go b/ecs-agent/api/container/status/containerstatus.go index 53686f01832..f579370ba5b 100644 --- a/ecs-agent/api/container/status/containerstatus.go +++ b/ecs-agent/api/container/status/containerstatus.go @@ -102,20 +102,6 @@ func (cs *ContainerStatus) ShouldReportToBackend(steadyStateStatus ContainerStat return *cs == steadyStateStatus || *cs == ContainerStopped } -// BackendStatus maps the internal container status in the agent to that in the -// backend -func (cs *ContainerStatus) BackendStatus(steadyStateStatus ContainerStatus) ContainerStatus { - if *cs == steadyStateStatus { - return ContainerRunning - } - - if *cs == ContainerStopped { - return ContainerStopped - } - - return ContainerStatusNone -} - // BackendStatusString maps the internal container status in Agent to a backend recognized // status string. func (cs ContainerStatus) BackendStatusString() string { diff --git a/ecs-agent/api/container/status/containerstatus_test.go b/ecs-agent/api/container/status/containerstatus_test.go index 2fd78e09a25..0ab7a6a62af 100644 --- a/ecs-agent/api/container/status/containerstatus_test.go +++ b/ecs-agent/api/container/status/containerstatus_test.go @@ -64,46 +64,6 @@ func TestShouldReportToBackend(t *testing.T) { } -func TestBackendStatus(t *testing.T) { - // BackendStatus is ContainerStatusNone when container status is ContainerStatusNone - var containerStatus ContainerStatus - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) - - // BackendStatus is still ContainerStatusNone when container status is ContainerManifestPulled - containerStatus = ContainerManifestPulled - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) - - // BackendStatus is still ContainerStatusNone when container status is ContainerPulled - containerStatus = ContainerPulled - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) - - // BackendStatus is still ContainerStatusNone when container status is ContainerCreated - containerStatus = ContainerCreated - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStatusNone) - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) - - containerStatus = ContainerRunning - // BackendStatus is ContainerRunning when container status is ContainerRunning - // and steady state is ContainerRunning - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerRunning) - // BackendStatus is still ContainerStatusNone when container status is ContainerRunning - // and steady state is ContainerResourcesProvisioned - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStatusNone) - - containerStatus = ContainerResourcesProvisioned - // BackendStatus is still ContainerRunning when container status is ContainerResourcesProvisioned - // and steady state is ContainerResourcesProvisioned - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerRunning) - - // BackendStatus is ContainerStopped when container status is ContainerStopped - containerStatus = ContainerStopped - assert.Equal(t, containerStatus.BackendStatus(ContainerRunning), ContainerStopped) - assert.Equal(t, containerStatus.BackendStatus(ContainerResourcesProvisioned), ContainerStopped) -} - type testContainerStatus struct { SomeStatus ContainerStatus `json:"status"` }