From d7ab1cd23a8014ea69d54f3f64cafa49b9fb612c Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Tue, 6 Feb 2024 11:24:13 -0500 Subject: [PATCH 1/3] For RECEPTOR_KUBE_SUPPORT_RECONNECT, Update auto/default to true --- pkg/workceptor/kubernetes.go | 4 ++-- pkg/workceptor/kubernetes_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 44bcee497..a2d838f87 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -771,13 +771,13 @@ func shouldUseReconnect() bool { case "disabled": return false case "auto": - // continue + return true default: return false } } - return false + return true } func parseTime(s string) *time.Time { diff --git a/pkg/workceptor/kubernetes_test.go b/pkg/workceptor/kubernetes_test.go index b23ff4b92..23500ede9 100644 --- a/pkg/workceptor/kubernetes_test.go +++ b/pkg/workceptor/kubernetes_test.go @@ -18,7 +18,7 @@ func TestShouldUseReconnect(t *testing.T) { { name: "Positive (undefined) test", envValue: "", - want: false, + want: true, }, { name: "Enabled test", @@ -33,7 +33,7 @@ func TestShouldUseReconnect(t *testing.T) { { name: "Auto test", envValue: "auto", - want: false, + want: true, }, { name: "Default test", From e488962b54d771bf7d64a46329074609c83f721c Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Wed, 7 Feb 2024 11:04:38 -0500 Subject: [PATCH 2/3] Added cancel for all timeout contexts --- tests/functional/cli/cli_test.go | 24 ++- tests/functional/mesh/firewall_test.go | 6 +- tests/functional/mesh/mesh_test.go | 29 ++-- tests/functional/mesh/tls_test.go | 30 ++-- tests/functional/mesh/work_test.go | 211 +++++++++++++++++-------- tests/functional/mesh/work_utils.go | 6 +- 6 files changed, 204 insertions(+), 102 deletions(-) diff --git a/tests/functional/cli/cli_test.go b/tests/functional/cli/cli_test.go index c1481c9f5..5763c5795 100644 --- a/tests/functional/cli/cli_test.go +++ b/tests/functional/cli/cli_test.go @@ -59,8 +59,10 @@ func TestListeners(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { @@ -116,8 +118,10 @@ func TestSSLListeners(t *testing.T) { return err == nil } - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success := utils.CheckUntilTimeout(ctx, 10*time.Millisecond, checkFunc) + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success := utils.CheckUntilTimeout(ctx1, 10*time.Millisecond, checkFunc) if !success { t.Fatalf("Timed out while waiting for tls backend to start:\n%s", receptorStdOut.String()) } @@ -190,8 +194,10 @@ func TestCostMap(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { @@ -238,8 +244,10 @@ func TestCosts(t *testing.T) { defer cmd.Process.Wait() defer cmd.Process.Kill() - ctx, _ := context.WithTimeout(context.Background(), 2*time.Second) - success, err := utils.CheckUntilTimeoutWithErr(ctx, 10*time.Millisecond, func() (bool, error) { + ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel1() + + success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { diff --git a/tests/functional/mesh/firewall_test.go b/tests/functional/mesh/firewall_test.go index f4ad3d392..92c03d5e4 100644 --- a/tests/functional/mesh/firewall_test.go +++ b/tests/functional/mesh/firewall_test.go @@ -55,8 +55,10 @@ func TestFirewall(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 20*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } diff --git a/tests/functional/mesh/mesh_test.go b/tests/functional/mesh/mesh_test.go index d89177152..02c6a9eaa 100644 --- a/tests/functional/mesh/mesh_test.go +++ b/tests/functional/mesh/mesh_test.go @@ -46,8 +46,10 @@ func TestMeshStartup(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -109,8 +111,10 @@ func TestTraceroute(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -212,8 +216,9 @@ func TestMeshShutdown(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) @@ -288,8 +293,10 @@ func TestCosts(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -351,9 +358,11 @@ func TestDuplicateNodes(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 1*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "connected using a node ID we are already connected to") }) { t.Fatal("duplicate nodes were not expected to exist together") diff --git a/tests/functional/mesh/tls_test.go b/tests/functional/mesh/tls_test.go index 96c12c85f..11ddc4450 100644 --- a/tests/functional/mesh/tls_test.go +++ b/tests/functional/mesh/tls_test.go @@ -103,8 +103,10 @@ func TestTCPSSLConnections(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 20*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } @@ -187,9 +189,11 @@ func TestTCPSSLClientAuthFailNoKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") @@ -265,9 +269,11 @@ func TestTCPSSLClientAuthFailBadKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") @@ -330,9 +336,11 @@ func TestTCPSSLServerAuthFailNoKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "first record does not look like a TLS handshake") }) { t.Fatal("Expected connection to fail but it succeeded") @@ -402,9 +410,11 @@ func TestTCPSSLServerAuthFailBadKey(t *testing.T) { defer m.WaitForShutdown() defer m.Destroy() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel1() + sleepInterval := 100 * time.Millisecond - if !utils.CheckUntilTimeout(ctx, sleepInterval, func() bool { + if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") diff --git a/tests/functional/mesh/work_test.go b/tests/functional/mesh/work_test.go index e157f9e8d..cb86d9054 100644 --- a/tests/functional/mesh/work_test.go +++ b/tests/functional/mesh/work_test.go @@ -33,8 +33,10 @@ func TestWorkSubmitWithTLSClient(t *testing.T) { if err != nil { t.Fatal(err, m.DataDir) } - ctx, _ := context.WithTimeout(context.Background(), 60*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.DataDir) } @@ -66,8 +68,10 @@ func TestWorkSubmitWithIncorrectTLSClient(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkFailed(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkFailed(ctx1, unitID) if err != nil { t.Fatal(err) @@ -79,15 +83,19 @@ func TestWorkSubmitWithIncorrectTLSClient(t *testing.T) { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) @@ -119,8 +127,10 @@ func TestStartRemoteWorkWithTTL(t *testing.T) { if err != nil { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) - err = controllers["node1"].AssertWorkTimedOut(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkTimedOut(ctx1, unitID) if err != nil { t.Fatal(err) } @@ -128,13 +138,17 @@ func TestStartRemoteWorkWithTTL(t *testing.T) { if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } - ctx, _ = context.WithTimeout(context.Background(), 5*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel3() + err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) } @@ -162,8 +176,10 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -171,8 +187,11 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkCancelled(ctx, unitID) + + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkCancelled(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -189,8 +208,10 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -206,18 +227,26 @@ func TestCancelThenReleaseRemoteWork(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel4() + + err = controllers["node1"].AssertWorkReleased(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node1"].GetDataDir(), "node1", unitID) + + ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel5() + + err = assertFilesReleased(ctx5, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertFilesReleased(ctx, nodes["node3"].GetDataDir(), "node3", remoteUnitID) + + ctx6, cancel6 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel6() + + err = assertFilesReleased(ctx6, nodes["node3"].GetDataDir(), "node3", remoteUnitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -242,8 +271,10 @@ func TestWorkSubmitWhileRemoteNodeIsDown(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkPending(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -256,15 +287,19 @@ func TestWorkSubmitWhileRemoteNodeIsDown(t *testing.T) { } // Wait for node3 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -293,13 +328,17 @@ func TestWorkStreamingResumesWhenRelayNodeRestarts(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertStdoutFizeSize(ctx, nodes["node1"].GetDataDir(), "node1", unitID, 1) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = assertStdoutFizeSize(ctx2, nodes["node1"].GetDataDir(), "node1", unitID, 1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -310,19 +349,25 @@ func TestWorkStreamingResumesWhenRelayNodeRestarts(t *testing.T) { nodes["node2"].Shutdown() nodes["node2"].Start() // Wait for node2 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel4() + + err = controllers["node1"].AssertWorkSucceeded(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = assertStdoutFizeSize(ctx, nodes["node1"].GetDataDir(), "node1", unitID, 10) + ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel5() + + err = assertStdoutFizeSize(ctx5, nodes["node1"].GetDataDir(), "node1", unitID, 10) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -349,8 +394,9 @@ func TestResultsOnRestartedNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkRunning(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -361,14 +407,18 @@ func TestResultsOnRestartedNode(t *testing.T) { t.Fatal(err, m.GetDataDir()) } // Wait for node3 to join the mesh again - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -399,8 +449,10 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { } // wait for 10 seconds, and check if the work is in pending state - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkPending(ctx, unitID) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -410,8 +462,11 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -429,8 +484,11 @@ func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkReleased(ctx, unitID) + + ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel3() + + err = controllers["node1"].AssertWorkReleased(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -454,8 +512,9 @@ func TestRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -476,7 +535,7 @@ func TestRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -506,8 +565,10 @@ func TestKubeRuntimeParams(t *testing.T) { m.Start(t.Name()) - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err := m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -556,7 +617,7 @@ func TestKubeRuntimeParams(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) @@ -585,8 +646,10 @@ func TestRuntimeParamsNotAllowed(t *testing.T) { t.Fatal(err, m.GetDataDir()) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -627,8 +690,10 @@ func TestKubeContainerFailure(t *testing.T) { m.Start(t.Name()) - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err := m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -644,8 +709,10 @@ func TestKubeContainerFailure(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkFailed(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkFailed(ctx2, unitID) if err != nil { t.Fatal("Expected work to fail but it succeeded") } @@ -720,8 +787,10 @@ func TestSignedWorkVerification(t *testing.T) { t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } @@ -738,8 +807,10 @@ func TestSignedWorkVerification(t *testing.T) { if err != nil { t.Fatal(err, m.GetDataDir()) } - ctx, _ = context.WithTimeout(context.Background(), 120*time.Second) - err = controllers["node1"].AssertWorkSucceeded(ctx, unitID) + ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel2() + + err = controllers["node1"].AssertWorkSucceeded(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } diff --git a/tests/functional/mesh/work_utils.go b/tests/functional/mesh/work_utils.go index 9acd5ec0d..ae1c9fe0a 100644 --- a/tests/functional/mesh/work_utils.go +++ b/tests/functional/mesh/work_utils.go @@ -21,8 +21,10 @@ func workSetup(workPluginName workPlugin, t *testing.T) (map[string]*ReceptorCon t.Fatal(err) } - ctx, _ := context.WithTimeout(context.Background(), 120*time.Second) - err = m.WaitForReady(ctx) + ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel1() + + err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.DataDir) } From 1cdcf58a16d3114e3a2efe4794e6b5d2a77bdc81 Mon Sep 17 00:00:00 2001 From: Thom Carlin Date: Wed, 7 Feb 2024 11:30:49 -0500 Subject: [PATCH 3/3] Corrected comments per Aaron and Hao --- pkg/workceptor/kubernetes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index a2d838f87..aae1435d1 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -760,8 +760,8 @@ func (kw *kubeUnit) runWorkUsingLogger() { func shouldUseReconnect() bool { // Support for streaming from pod with timestamps using reconnect method is in all current versions // Can override the detection by setting the RECEPTOR_KUBE_SUPPORT_RECONNECT - // accepted values: "enabled", "disabled", "auto" with "disabled" being the default - // all invalid value will assume to be "disabled" + // accepted values: "enabled", "disabled", "auto". The default is "enabled" + // all invalid values will assume to be "disabled" env, ok := os.LookupEnv("RECEPTOR_KUBE_SUPPORT_RECONNECT") if ok {