Skip to content

Commit

Permalink
chore: pkg imported more than once (#11775)
Browse files Browse the repository at this point in the history
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
  • Loading branch information
testwill authored Sep 11, 2023
1 parent 7e62657 commit 17bb39b
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 27 deletions.
13 changes: 6 additions & 7 deletions server/workflow/workflow_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned"
"github.com/argoproj/argo-workflows/v3/server/auth"
Expand Down Expand Up @@ -129,9 +128,9 @@ func (s *workflowServer) GetWorkflow(ctx context.Context, req *workflowpkg.Workf
return wf, nil
}

func mergeWithArchivedWorkflows(liveWfs v1alpha1.WorkflowList, archivedWfs v1alpha1.WorkflowList, numWfsToKeep int) *v1alpha1.WorkflowList {
var mergedWfs []v1alpha1.Workflow
var uidToWfs = map[types.UID][]v1alpha1.Workflow{}
func mergeWithArchivedWorkflows(liveWfs wfv1.WorkflowList, archivedWfs wfv1.WorkflowList, numWfsToKeep int) *wfv1.WorkflowList {
var mergedWfs []wfv1.Workflow
var uidToWfs = map[types.UID][]wfv1.Workflow{}
for _, item := range liveWfs.Items {
uidToWfs[item.UID] = append(uidToWfs[item.UID], item)
}
Expand All @@ -152,17 +151,17 @@ func mergeWithArchivedWorkflows(liveWfs v1alpha1.WorkflowList, archivedWfs v1alp
}
}
}
mergedWfsList := v1alpha1.WorkflowList{Items: mergedWfs, ListMeta: liveWfs.ListMeta}
mergedWfsList := wfv1.WorkflowList{Items: mergedWfs, ListMeta: liveWfs.ListMeta}
sort.Sort(mergedWfsList.Items)
numWfs := 0
var finalWfs []v1alpha1.Workflow
var finalWfs []wfv1.Workflow
for _, item := range mergedWfsList.Items {
if numWfsToKeep == 0 || numWfs < numWfsToKeep {
finalWfs = append(finalWfs, item)
numWfs += 1
}
}
return &v1alpha1.WorkflowList{Items: finalWfs, ListMeta: liveWfs.ListMeta}
return &wfv1.WorkflowList{Items: finalWfs, ListMeta: liveWfs.ListMeta}
}

func (s *workflowServer) ListWorkflows(ctx context.Context, req *workflowpkg.WorkflowListRequest) (*wfv1.WorkflowList, error) {
Expand Down
3 changes: 1 addition & 2 deletions util/file/fileutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"strings"

"github.com/klauspost/pgzip"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"k8s.io/utils/env"
)
Expand Down Expand Up @@ -153,7 +152,7 @@ func WalkManifests(root string, fn func(path string, data []byte) error) error {
case info.IsDir():
return nil // skip
default:
logrus.Debugf("ignoring file with unknown extension: %s", path)
log.Debugf("ignoring file with unknown extension: %s", path)
return nil
}

Expand Down
35 changes: 17 additions & 18 deletions workflow/controller/operator_concurrency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (

"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -165,7 +164,7 @@ func TestSemaphoreTmplLevel(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(t, err)
Expand Down Expand Up @@ -204,7 +203,7 @@ func TestSemaphoreTmplLevel(t *testing.T) {
}

// Updating Pod state
makePodsPhase(ctx, woc, v1.PodFailed)
makePodsPhase(ctx, woc, apiv1.PodFailed)

// Release the lock
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand All @@ -226,7 +225,7 @@ func TestSemaphoreScriptTmplLevel(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(t, err)
Expand Down Expand Up @@ -264,7 +263,7 @@ func TestSemaphoreScriptTmplLevel(t *testing.T) {
assert.Equal(t, wfv1.NodePending, node.Phase)
}
// Updating Pod state
makePodsPhase(ctx, woc, v1.PodFailed)
makePodsPhase(ctx, woc, apiv1.PodFailed)

// Release the lock
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand All @@ -286,7 +285,7 @@ func TestSemaphoreScriptConfigMapInDifferentNamespace(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("other").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(t, err)
Expand Down Expand Up @@ -326,7 +325,7 @@ func TestSemaphoreScriptConfigMapInDifferentNamespace(t *testing.T) {
assert.Equal(t, wfv1.NodePending, node.Phase)
}
// Updating Pod state
makePodsPhase(ctx, woc, v1.PodFailed)
makePodsPhase(ctx, woc, apiv1.PodFailed)

// Release the lock
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand All @@ -348,7 +347,7 @@ func TestSemaphoreResourceTmplLevel(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(t, err)
Expand Down Expand Up @@ -387,7 +386,7 @@ func TestSemaphoreResourceTmplLevel(t *testing.T) {
}

// Updating Pod state
makePodsPhase(ctx, woc, v1.PodFailed)
makePodsPhase(ctx, woc, apiv1.PodFailed)

// Release the lock
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand Down Expand Up @@ -478,7 +477,7 @@ func TestMutexInDAG(t *testing.T) {
}
}
assert.Equal(wfv1.WorkflowRunning, woc.wf.Status.Phase)
makePodsPhase(ctx, woc, v1.PodSucceeded)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)

woc1 := newWorkflowOperationCtx(woc.wf, controller)
woc1.operate(ctx)
Expand Down Expand Up @@ -550,7 +549,7 @@ func TestMutexInDAGWithInterpolation(t *testing.T) {
}
}
assert.Equal(wfv1.WorkflowRunning, woc.wf.Status.Phase)
makePodsPhase(ctx, woc, v1.PodSucceeded)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)

woc1 := newWorkflowOperationCtx(woc.wf, controller)
woc1.operate(ctx)
Expand Down Expand Up @@ -603,7 +602,7 @@ func TestSynchronizationWithRetry(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(err)
Expand All @@ -620,7 +619,7 @@ func TestSynchronizationWithRetry(t *testing.T) {
}

// Updating Pod state
makePodsPhase(ctx, woc, v1.PodSucceeded)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)

// Release the lock from hello1
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand All @@ -634,7 +633,7 @@ func TestSynchronizationWithRetry(t *testing.T) {
}
}
// Updating Pod state
makePodsPhase(ctx, woc, v1.PodSucceeded)
makePodsPhase(ctx, woc, apiv1.PodSucceeded)

// Release the lock from hello2
woc = newWorkflowOperationCtx(woc.wf, controller)
Expand Down Expand Up @@ -811,7 +810,7 @@ func TestSynchronizationWithStep(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(err)
Expand Down Expand Up @@ -888,7 +887,7 @@ func TestSynchronizationWithStepRetry(t *testing.T) {
ctx := context.Background()
controller.syncManager = sync.NewLockManager(GetSyncLimitFunc(ctx, controller.kubeclientset), func(key string) {
}, workflowExistenceFunc)
var cm v1.ConfigMap
var cm apiv1.ConfigMap
wfv1.MustUnmarshal([]byte(configMap), &cm)
_, err := controller.kubeclientset.CoreV1().ConfigMaps("default").Create(ctx, &cm, metav1.CreateOptions{})
assert.NoError(err)
Expand All @@ -906,15 +905,15 @@ func TestSynchronizationWithStepRetry(t *testing.T) {
}
}
// Updating Pod state
makePodsPhase(ctx, woc, v1.PodRunning)
makePodsPhase(ctx, woc, apiv1.PodRunning)

woc.operate(ctx)
for _, n := range woc.wf.Status.Nodes {
if n.Name == "[0].step1(0)" {
assert.Equal(n.Phase, wfv1.NodeRunning)
}
}
makePodsPhase(ctx, woc, v1.PodFailed)
makePodsPhase(ctx, woc, apiv1.PodFailed)
woc.operate(ctx)
for _, n := range woc.wf.Status.Nodes {
if n.Name == "[0].step1(0)" {
Expand Down

0 comments on commit 17bb39b

Please sign in to comment.