Skip to content

Commit

Permalink
[Chore] Fix lint errors caused by casting int to int32 (#2368)
Browse files Browse the repository at this point in the history
  • Loading branch information
kevin85421 authored Sep 10, 2024
1 parent 22c2b45 commit fb58429
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 17 deletions.
12 changes: 6 additions & 6 deletions ray-operator/controllers/ray/common/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,14 +264,14 @@ func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType r
}

if rayContainer.LivenessProbe == nil {
probeTimeout := utils.DefaultLivenessProbeTimeoutSeconds
probeTimeout := int32(utils.DefaultLivenessProbeTimeoutSeconds)
if rayNodeType == rayv1.HeadNode {
probeTimeout = utils.DefaultHeadLivenessProbeTimeoutSeconds
probeTimeout = int32(utils.DefaultHeadLivenessProbeTimeoutSeconds)
}

rayContainer.LivenessProbe = &corev1.Probe{
InitialDelaySeconds: utils.DefaultLivenessProbeInitialDelaySeconds,
TimeoutSeconds: int32(probeTimeout),
TimeoutSeconds: probeTimeout,
PeriodSeconds: utils.DefaultLivenessProbePeriodSeconds,
SuccessThreshold: utils.DefaultLivenessProbeSuccessThreshold,
FailureThreshold: utils.DefaultLivenessProbeFailureThreshold,
Expand All @@ -280,13 +280,13 @@ func initLivenessAndReadinessProbe(rayContainer *corev1.Container, rayNodeType r
}

if rayContainer.ReadinessProbe == nil {
probeTimeout := utils.DefaultReadinessProbeTimeoutSeconds
probeTimeout := int32(utils.DefaultReadinessProbeTimeoutSeconds)
if rayNodeType == rayv1.HeadNode {
probeTimeout = utils.DefaultHeadReadinessProbeTimeoutSeconds
probeTimeout = int32(utils.DefaultHeadReadinessProbeTimeoutSeconds)
}
rayContainer.ReadinessProbe = &corev1.Probe{
InitialDelaySeconds: utils.DefaultReadinessProbeInitialDelaySeconds,
TimeoutSeconds: int32(probeTimeout),
TimeoutSeconds: probeTimeout,
PeriodSeconds: utils.DefaultReadinessProbePeriodSeconds,
SuccessThreshold: utils.DefaultReadinessProbeSuccessThreshold,
FailureThreshold: utils.DefaultReadinessProbeFailureThreshold,
Expand Down
14 changes: 4 additions & 10 deletions ray-operator/controllers/ray/raycluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
errstd "errors"
"fmt"
"math"
"os"
"reflect"
"runtime"
Expand Down Expand Up @@ -769,21 +768,16 @@ func (r *RayClusterReconciler) reconcilePods(ctx context.Context, instance *rayv
if worker.NumOfHosts <= 0 {
worker.NumOfHosts = 1
}
numExpectedPods := workerReplicas * worker.NumOfHosts

if len(runningPods.Items) > math.MaxInt32 {
return errstd.New("len(runningPods.Items) exceeds math.MaxInt32")
}
diff := numExpectedPods - int32(len(runningPods.Items)) //nolint:gosec // Already checked in the previous line.
numExpectedPods := int(workerReplicas * worker.NumOfHosts)
diff := numExpectedPods - len(runningPods.Items)

logger.Info("reconcilePods", "workerReplicas", workerReplicas, "NumOfHosts", worker.NumOfHosts, "runningPods", len(runningPods.Items), "diff", diff)

if diff > 0 {
// pods need to be added
logger.Info("reconcilePods", "Number workers to add", diff, "Worker group", worker.GroupName)
// create all workers of this group
var i int32
for i = 0; i < diff; i++ {
for i := 0; i < diff; i++ {
logger.Info("reconcilePods", "creating worker for group", worker.GroupName, fmt.Sprintf("index %d", i), fmt.Sprintf("in total %d", diff))
if err := r.createWorkerPod(ctx, *instance, *worker.DeepCopy()); err != nil {
return errstd.Join(utils.ErrFailedCreateWorkerPod, err)
Expand Down Expand Up @@ -814,7 +808,7 @@ func (r *RayClusterReconciler) reconcilePods(ctx context.Context, instance *rayv
// diff < 0 means that we need to delete some Pods to meet the desired number of replicas.
randomlyRemovedWorkers := -diff
logger.Info("reconcilePods", "Number workers to delete randomly", randomlyRemovedWorkers, "Worker group", worker.GroupName)
for i := 0; i < int(randomlyRemovedWorkers); i++ {
for i := 0; i < randomlyRemovedWorkers; i++ {
randomPodToDelete := runningPods.Items[i]
logger.Info("Randomly deleting Pod", "progress", fmt.Sprintf("%d / %d", i+1, randomlyRemovedWorkers), "with name", randomPodToDelete.Name)
if err := r.Delete(ctx, &randomPodToDelete); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion ray-operator/controllers/ray/rayservice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (r *RayServiceReconciler) calculateStatus(ctx context.Context, rayServiceIn
if numServeEndpoints > math.MaxInt32 {
return errstd.New("numServeEndpoints exceeds math.MaxInt32")
}
rayServiceInstance.Status.NumServeEndpoints = int32(numServeEndpoints) //nolint:gosec // Already checked in the previous line.
rayServiceInstance.Status.NumServeEndpoints = int32(numServeEndpoints) //nolint:gosec // This is a false positive from gosec. See https://github.com/securego/gosec/issues/1212 for more details.
return nil
}

Expand Down

0 comments on commit fb58429

Please sign in to comment.