Skip to content

Commit

Permalink
[Style] Fix golangci-lint rule: unparam (#2195)
Browse files Browse the repository at this point in the history
  • Loading branch information
MortalHappiness committed Jun 15, 2024
1 parent 40a946a commit 160fdee
Show file tree
Hide file tree
Showing 8 changed files with 19 additions and 19 deletions.
2 changes: 1 addition & 1 deletion ray-operator/.golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ linters:
- staticcheck
- typecheck
- unconvert
# - unparam
- unparam
- unused
- wastedassign
disable-all: true
Expand Down
11 changes: 6 additions & 5 deletions ray-operator/controllers/ray/common/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,8 @@ func TestDefaultWorkerPodTemplateWithName(t *testing.T) {
assert.Equal(t, worker, expectedWorker)
}

func containerPortExists(ports []corev1.ContainerPort, name string, containerPort int32) error {
func containerPortExists(ports []corev1.ContainerPort, containerPort int32) error {
name := utils.MetricsPortName
for _, port := range ports {
if port.Name == name {
if port.ContainerPort != containerPort {
Expand All @@ -808,7 +809,7 @@ func TestDefaultHeadPodTemplateWithConfigurablePorts(t *testing.T) {
podTemplateSpec := DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
// DefaultHeadPodTemplate will add the default metrics port if user doesn't specify it.
// Verify the default metrics port exists.
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, utils.MetricsPortName, int32(utils.DefaultMetricsPort)); err != nil {
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, int32(utils.DefaultMetricsPort)); err != nil {
t.Fatal(err)
}
customMetricsPort := int32(utils.DefaultMetricsPort) + 1
Expand All @@ -819,7 +820,7 @@ func TestDefaultHeadPodTemplateWithConfigurablePorts(t *testing.T) {
cluster.Spec.HeadGroupSpec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{metricsPort}
podTemplateSpec = DefaultHeadPodTemplate(ctx, *cluster, cluster.Spec.HeadGroupSpec, podName, "6379")
// Verify the custom metrics port exists.
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, utils.MetricsPortName, customMetricsPort); err != nil {
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, customMetricsPort); err != nil {
t.Fatal(err)
}
}
Expand All @@ -835,7 +836,7 @@ func TestDefaultWorkerPodTemplateWithConfigurablePorts(t *testing.T) {
podTemplateSpec := DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
// DefaultWorkerPodTemplate will add the default metrics port if user doesn't specify it.
// Verify the default metrics port exists.
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, utils.MetricsPortName, int32(utils.DefaultMetricsPort)); err != nil {
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, int32(utils.DefaultMetricsPort)); err != nil {
t.Fatal(err)
}
customMetricsPort := int32(utils.DefaultMetricsPort) + 1
Expand All @@ -846,7 +847,7 @@ func TestDefaultWorkerPodTemplateWithConfigurablePorts(t *testing.T) {
cluster.Spec.WorkerGroupSpecs[0].Template.Spec.Containers[0].Ports = []corev1.ContainerPort{metricsPort}
podTemplateSpec = DefaultWorkerPodTemplate(ctx, *cluster, worker, podName, fqdnRayIP, "6379")
// Verify the custom metrics port exists.
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, utils.MetricsPortName, customMetricsPort); err != nil {
if err := containerPortExists(podTemplateSpec.Spec.Containers[0].Ports, customMetricsPort); err != nil {
t.Fatal(err)
}
}
Expand Down
8 changes: 4 additions & 4 deletions ray-operator/controllers/ray/common/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -372,9 +372,9 @@ func setLabelsforUserProvidedService(service *corev1.Service, labels map[string]

// getServicePorts will either user passing ports or default ports to create service.
func getServicePorts(cluster rayv1.RayCluster) map[string]int32 {
ports, err := getPortsFromCluster(cluster)
ports := getPortsFromCluster(cluster)
// Assign default ports
if err != nil || len(ports) == 0 {
if len(ports) == 0 {
ports = getDefaultPorts()
}

Expand All @@ -389,7 +389,7 @@ func getServicePorts(cluster rayv1.RayCluster) map[string]int32 {
// getPortsFromCluster get the ports from head container and directly map them in service
// It's user's responsibility to maintain rayStartParam ports and container ports mapping
// TODO: Consider to infer ports from rayStartParams (source of truth) in the future.
func getPortsFromCluster(cluster rayv1.RayCluster) (map[string]int32, error) {
func getPortsFromCluster(cluster rayv1.RayCluster) map[string]int32 {
svcPorts := map[string]int32{}

cPorts := cluster.Spec.HeadGroupSpec.Template.Spec.Containers[utils.RayContainerIndex].Ports
Expand All @@ -400,7 +400,7 @@ func getPortsFromCluster(cluster rayv1.RayCluster) (map[string]int32, error) {
svcPorts[port.Name] = port.ContainerPort
}

return svcPorts, nil
return svcPorts
}

func getDefaultPorts() map[string]int32 {
Expand Down
3 changes: 1 addition & 2 deletions ray-operator/controllers/ray/common/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,8 +207,7 @@ func TestBuildServiceForHeadPodWithAnnotations(t *testing.T) {
}

func TestGetPortsFromCluster(t *testing.T) {
svcPorts, err := getPortsFromCluster(*instanceWithWrongSvc)
assert.Nil(t, err)
svcPorts := getPortsFromCluster(*instanceWithWrongSvc)

// getPortsFromCluster creates service ports based on the container ports.
// It will assign a generated service port name if the container port name
Expand Down
4 changes: 2 additions & 2 deletions ray-operator/controllers/ray/raycluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ var _ = Context("Inside the default namespace", func() {

// Ray Autoscaler should clean up WorkersToDelete after scaling process has finished.
// Call cleanUpWorkersToDelete to simulate the behavior of the Ray Autoscaler.
cleanUpWorkersToDelete(ctx, rayCluster, 0)
cleanUpWorkersToDelete(ctx, rayCluster)
})

It("Simulate Ray Autoscaler scales up", func() {
Expand Down Expand Up @@ -629,7 +629,7 @@ var _ = Context("Inside the default namespace", func() {

// Ray Autoscaler should clean up WorkersToDelete after scaling process has finished.
// Call cleanUpWorkersToDelete to simulate the behavior of the Ray Autoscaler.
cleanUpWorkersToDelete(ctx, rayCluster, 0)
cleanUpWorkersToDelete(ctx, rayCluster)
})

It("Simulate Ray Autoscaler scales up", func() {
Expand Down
2 changes: 1 addition & 1 deletion ray-operator/controllers/ray/rayjob_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ var _ = Context("RayJob in K8sJobMode", func() {

It("RayJobs's JobDeploymentStatus transitions from Running to Complete.", func() {
// Update fake dashboard client to return job info with "Succeeded" status.
getJobInfo := func(context.Context, string) (*utils.RayJobInfo, error) {
getJobInfo := func(context.Context, string) (*utils.RayJobInfo, error) { //nolint:unparam
return &utils.RayJobInfo{JobStatus: rayv1.JobStatusSucceeded}, nil
}
fakeRayDashboardClient.GetJobInfoMock.Store(&getJobInfo)
Expand Down
4 changes: 2 additions & 2 deletions ray-operator/controllers/ray/rayservice_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ var _ = Context("Inside the default namespace", func() {
getResourceFunc(ctx, client.ObjectKey{Name: myRayService.Status.ActiveServiceStatus.RayClusterName, Namespace: "default"}, myRayCluster),
time.Second*3, time.Millisecond*500).Should(BeNil(), "My myRayCluster = %v", myRayCluster.Name)

cleanUpWorkersToDelete(ctx, myRayCluster, 0)
cleanUpWorkersToDelete(ctx, myRayCluster)
})

It("Autoscaler updates the pending RayCluster and should not switch to a new RayCluster", func() {
Expand Down Expand Up @@ -436,7 +436,7 @@ var _ = Context("Inside the default namespace", func() {
getRayClusterNameFunc(ctx, myRayService),
time.Second*15, time.Millisecond*500).Should(Equal(initialPendingClusterName), "New active RayCluster name = %v", myRayService.Status.ActiveServiceStatus.RayClusterName)

cleanUpWorkersToDelete(ctx, myRayCluster, 0)
cleanUpWorkersToDelete(ctx, myRayCluster)
})
It("should update the active RayCluster in place when WorkerGroupSpecs are modified by the user in RayServiceSpec", func() {
initialClusterName, _ := getRayClusterNameFunc(ctx, myRayService)()
Expand Down
4 changes: 2 additions & 2 deletions ray-operator/controllers/ray/suite_helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,14 @@ func isAllPodsRunningByFilters(ctx context.Context, podlist corev1.PodList, opt
return true
}

func cleanUpWorkersToDelete(ctx context.Context, rayCluster *rayv1.RayCluster, workerGroupIndex int) {
func cleanUpWorkersToDelete(ctx context.Context, rayCluster *rayv1.RayCluster) {
// Updating WorkersToDelete is the responsibility of the Ray Autoscaler. In this function,
// we simulate the behavior of the Ray Autoscaler after the scaling process has finished.
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
gomega.Eventually(
getResourceFunc(ctx, client.ObjectKey{Name: rayCluster.Name, Namespace: "default"}, rayCluster),
time.Second*9, time.Millisecond*500).Should(gomega.BeNil(), "raycluster = %v", rayCluster)
rayCluster.Spec.WorkerGroupSpecs[workerGroupIndex].ScaleStrategy.WorkersToDelete = []string{}
rayCluster.Spec.WorkerGroupSpecs[0].ScaleStrategy.WorkersToDelete = []string{}
return k8sClient.Update(ctx, rayCluster)
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to clean up WorkersToDelete")
Expand Down

0 comments on commit 160fdee

Please sign in to comment.