Skip to content

Commit

Permalink
[Style] Fix golangci-lint rule: govet (#2144)
Browse files Browse the repository at this point in the history
  • Loading branch information
MortalHappiness committed Jun 24, 2024
1 parent 828afba commit a43217b
Show file tree
Hide file tree
Showing 29 changed files with 560 additions and 562 deletions.
76 changes: 38 additions & 38 deletions docs/reference/api.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion ray-operator/.golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ linters:
- goimports
- gosec
- gosimple
# - govet
- govet
- ineffassign
# - lll
- makezero
Expand Down
22 changes: 11 additions & 11 deletions ray-operator/apis/config/v1alpha1/configuration_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ type Configuration struct {
// resources live. Defaults to the pod namesapce if not set.
LeaderElectionNamespace string `json:"leaderElectionNamespace,omitempty"`

// ReconcileConcurrency is the max concurrency for each reconciler.
ReconcileConcurrency int `json:"reconcileConcurrency,omitempty"`

// WatchNamespace specifies a list of namespaces to watch for custom resources, separated by commas.
// If empty, all namespaces will be watched.
WatchNamespace string `json:"watchNamespace,omitempty"`
Expand All @@ -46,6 +43,17 @@ type Configuration struct {
// Defaults to `json` if empty.
LogStdoutEncoder string `json:"logStdoutEncoder,omitempty"`

// HeadSidecarContainers includes specification for a sidecar container
// to inject into every Head pod.
HeadSidecarContainers []corev1.Container `json:"headSidecarContainers,omitempty"`

// WorkerSidecarContainers includes specification for a sidecar container
// to inject into every Worker pod.
WorkerSidecarContainers []corev1.Container `json:"workerSidecarContainers,omitempty"`

// ReconcileConcurrency is the max concurrency for each reconciler.
ReconcileConcurrency int `json:"reconcileConcurrency,omitempty"`

// EnableBatchScheduler enables the batch scheduler. Currently this is supported
// by Volcano to support gang scheduling.
EnableBatchScheduler bool `json:"enableBatchScheduler,omitempty"`
Expand All @@ -55,14 +63,6 @@ type Configuration struct {
// ingress traffic to the Ray cluster from other pods or Kuberay is running in a network without
// connectivity to Pods.
UseKubernetesProxy bool `json:"useKubernetesProxy,omitempty"`

// HeadSidecarContainers includes specification for a sidecar container
// to inject into every Head pod.
HeadSidecarContainers []corev1.Container `json:"headSidecarContainers,omitempty"`

// WorkerSidecarContainers includes specification for a sidecar container
// to inject into every Worker pod.
WorkerSidecarContainers []corev1.Container `json:"workerSidecarContainers,omitempty"`
}

func (config Configuration) GetDashboardClient(mgr manager.Manager) func() utils.RayDashboardClientInterface {
Expand Down
58 changes: 29 additions & 29 deletions ray-operator/apis/ray/v1/raycluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,22 @@ import (

// RayClusterSpec defines the desired state of RayCluster
type RayClusterSpec struct {
// Suspend indicates whether a RayCluster should be suspended.
// A suspended RayCluster will have head pods and worker pods deleted.
Suspend *bool `json:"suspend,omitempty"`
// AutoscalerOptions specifies optional configuration for the Ray autoscaler.
AutoscalerOptions *AutoscalerOptions `json:"autoscalerOptions,omitempty"`
HeadServiceAnnotations map[string]string `json:"headServiceAnnotations,omitempty"`
// EnableInTreeAutoscaling indicates whether operator should create in tree autoscaling configs
EnableInTreeAutoscaling *bool `json:"enableInTreeAutoscaling,omitempty"`
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// HeadGroupSpecs are the spec for the head pod
HeadGroupSpec HeadGroupSpec `json:"headGroupSpec"`
// WorkerGroupSpecs are the specs for the worker pods
WorkerGroupSpecs []WorkerGroupSpec `json:"workerGroupSpecs,omitempty"`
// RayVersion is used to determine the command for the Kubernetes Job managed by RayJob
RayVersion string `json:"rayVersion,omitempty"`
// EnableInTreeAutoscaling indicates whether operator should create in tree autoscaling configs
EnableInTreeAutoscaling *bool `json:"enableInTreeAutoscaling,omitempty"`
// AutoscalerOptions specifies optional configuration for the Ray autoscaler.
AutoscalerOptions *AutoscalerOptions `json:"autoscalerOptions,omitempty"`
HeadServiceAnnotations map[string]string `json:"headServiceAnnotations,omitempty"`
// Suspend indicates whether a RayCluster should be suspended.
// A suspended RayCluster will have head pods and worker pods deleted.
Suspend *bool `json:"suspend,omitempty"`
// WorkerGroupSpecs are the specs for the worker pods
WorkerGroupSpecs []WorkerGroupSpec `json:"workerGroupSpecs,omitempty"`
}

// HeadGroupSpec are the spec for the head pod
Expand Down Expand Up @@ -56,15 +56,15 @@ type WorkerGroupSpec struct {
// MaxReplicas denotes the maximum number of desired Pods for this worker group, and the default value is maxInt32.
// +kubebuilder:default:=2147483647
MaxReplicas *int32 `json:"maxReplicas"`
// NumOfHosts denotes the number of hosts to create per replica. The default value is 1.
// +kubebuilder:default:=1
NumOfHosts int32 `json:"numOfHosts,omitempty"`
// RayStartParams are the params of the start command: address, object-store-memory, ...
RayStartParams map[string]string `json:"rayStartParams"`
// Template is a pod template for the worker
Template corev1.PodTemplateSpec `json:"template"`
// ScaleStrategy defines which pods to remove
ScaleStrategy ScaleStrategy `json:"scaleStrategy,omitempty"`
// NumOfHosts denotes the number of hosts to create per replica. The default value is 1.
// +kubebuilder:default:=1
NumOfHosts int32 `json:"numOfHosts,omitempty"`
}

// ScaleStrategy to remove workers
Expand All @@ -82,12 +82,6 @@ type AutoscalerOptions struct {
Image *string `json:"image,omitempty"`
// ImagePullPolicy optionally overrides the autoscaler container's image pull policy. This override is for provided for autoscaler testing and development.
ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Optional list of environment variables to set in the autoscaler container.
Env []corev1.EnvVar `json:"env,omitempty"`
// Optional list of sources to populate environment variables in the autoscaler container.
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
// Optional list of volumeMounts. This is needed for enabling TLS for the autoscaler container.
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
// SecurityContext defines the security options the container should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
Expand All @@ -101,6 +95,12 @@ type AutoscalerOptions struct {
// Aggressive: An alias for Default; upscaling is not rate-limited.
// It is not read by the KubeRay operator but by the Ray autoscaler.
UpscalingMode *UpscalingMode `json:"upscalingMode,omitempty"`
// Optional list of environment variables to set in the autoscaler container.
Env []corev1.EnvVar `json:"env,omitempty"`
// Optional list of sources to populate environment variables in the autoscaler container.
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
// Optional list of volumeMounts. This is needed for enabling TLS for the autoscaler container.
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
}

// +kubebuilder:validation:Enum=Default;Aggressive;Conservative
Expand All @@ -121,16 +121,6 @@ type RayClusterStatus struct {
// Important: Run "make" to regenerate code after modifying this file
// Status reflects the status of the cluster
State ClusterState `json:"state,omitempty"`
// ReadyWorkerReplicas indicates how many worker replicas are ready in the cluster
ReadyWorkerReplicas int32 `json:"readyWorkerReplicas,omitempty"`
// AvailableWorkerReplicas indicates how many replicas are available in the cluster
AvailableWorkerReplicas int32 `json:"availableWorkerReplicas,omitempty"`
// DesiredWorkerReplicas indicates overall desired replicas claimed by the user at the cluster level.
DesiredWorkerReplicas int32 `json:"desiredWorkerReplicas,omitempty"`
// MinWorkerReplicas indicates sum of minimum replicas of each node group.
MinWorkerReplicas int32 `json:"minWorkerReplicas,omitempty"`
// MaxWorkerReplicas indicates sum of maximum replicas of each node group.
MaxWorkerReplicas int32 `json:"maxWorkerReplicas,omitempty"`
// DesiredCPU indicates total desired CPUs for the cluster
DesiredCPU resource.Quantity `json:"desiredCPU,omitempty"`
// DesiredMemory indicates total desired memory for the cluster
Expand All @@ -150,6 +140,16 @@ type RayClusterStatus struct {
Head HeadInfo `json:"head,omitempty"`
// Reason provides more information about current State
Reason string `json:"reason,omitempty"`
// ReadyWorkerReplicas indicates how many worker replicas are ready in the cluster
ReadyWorkerReplicas int32 `json:"readyWorkerReplicas,omitempty"`
// AvailableWorkerReplicas indicates how many replicas are available in the cluster
AvailableWorkerReplicas int32 `json:"availableWorkerReplicas,omitempty"`
// DesiredWorkerReplicas indicates overall desired replicas claimed by the user at the cluster level.
DesiredWorkerReplicas int32 `json:"desiredWorkerReplicas,omitempty"`
// MinWorkerReplicas indicates sum of minimum replicas of each node group.
MinWorkerReplicas int32 `json:"minWorkerReplicas,omitempty"`
// MaxWorkerReplicas indicates sum of maximum replicas of each node group.
MaxWorkerReplicas int32 `json:"maxWorkerReplicas,omitempty"`
// observedGeneration is the most recent generation observed for this RayCluster. It corresponds to the
// RayCluster's generation, which is updated on mutation by the API Server.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
Expand Down
52 changes: 26 additions & 26 deletions ray-operator/apis/ray/v1/rayjob_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,51 +66,51 @@ type SubmitterConfig struct {

// RayJobSpec defines the desired state of RayJob
type RayJobSpec struct {
// ActiveDeadlineSeconds is the duration in seconds that the RayJob may be active before
// KubeRay actively tries to terminate the RayJob; value must be positive integer.
ActiveDeadlineSeconds *int32 `json:"activeDeadlineSeconds,omitempty"`
// RayClusterSpec is the cluster template to run the job
RayClusterSpec *RayClusterSpec `json:"rayClusterSpec,omitempty"`
// SubmitterPodTemplate is the template for the pod that will run `ray job submit`.
SubmitterPodTemplate *corev1.PodTemplateSpec `json:"submitterPodTemplate,omitempty"`
// Metadata is data to store along with this job.
Metadata map[string]string `json:"metadata,omitempty"`
// clusterSelector is used to select running rayclusters by labels
ClusterSelector map[string]string `json:"clusterSelector,omitempty"`
// Configurations of submitter k8s job.
SubmitterConfig *SubmitterConfig `json:"submitterConfig,omitempty"`
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Entrypoint string `json:"entrypoint"`
// Metadata is data to store along with this job.
Metadata map[string]string `json:"metadata,omitempty"`
// RuntimeEnvYAML represents the runtime environment configuration
// provided as a multi-line YAML string.
RuntimeEnvYAML string `json:"runtimeEnvYAML,omitempty"`
// If jobId is not set, a new jobId will be auto-generated.
JobId string `json:"jobId,omitempty"`
// ShutdownAfterJobFinishes will determine whether to delete the ray cluster once rayJob succeed or failed.
ShutdownAfterJobFinishes bool `json:"shutdownAfterJobFinishes,omitempty"`
// TTLSecondsAfterFinished is the TTL to clean up RayCluster.
// It's only working when ShutdownAfterJobFinishes set to true.
// +kubebuilder:default:=0
TTLSecondsAfterFinished int32 `json:"ttlSecondsAfterFinished,omitempty"`
// ActiveDeadlineSeconds is the duration in seconds that the RayJob may be active before
// KubeRay actively tries to terminate the RayJob; value must be positive integer.
ActiveDeadlineSeconds *int32 `json:"activeDeadlineSeconds,omitempty"`
// RayClusterSpec is the cluster template to run the job
RayClusterSpec *RayClusterSpec `json:"rayClusterSpec,omitempty"`
// clusterSelector is used to select running rayclusters by labels
ClusterSelector map[string]string `json:"clusterSelector,omitempty"`
// SubmissionMode specifies how RayJob submits the Ray job to the RayCluster.
// In "K8sJobMode", the KubeRay operator creates a submitter Kubernetes Job to submit the Ray job.
// In "HTTPMode", the KubeRay operator sends a request to the RayCluster to create a Ray job.
// +kubebuilder:default:=K8sJobMode
SubmissionMode JobSubmissionMode `json:"submissionMode,omitempty"`
// EntrypointResources specifies the custom resources and quantities to reserve for the
// entrypoint command.
EntrypointResources string `json:"entrypointResources,omitempty"`
// EntrypointNumCpus specifies the number of cpus to reserve for the entrypoint command.
EntrypointNumCpus float32 `json:"entrypointNumCpus,omitempty"`
// EntrypointNumGpus specifies the number of gpus to reserve for the entrypoint command.
EntrypointNumGpus float32 `json:"entrypointNumGpus,omitempty"`
// TTLSecondsAfterFinished is the TTL to clean up RayCluster.
// It's only working when ShutdownAfterJobFinishes set to true.
// +kubebuilder:default:=0
TTLSecondsAfterFinished int32 `json:"ttlSecondsAfterFinished,omitempty"`
// ShutdownAfterJobFinishes will determine whether to delete the ray cluster once rayJob succeed or failed.
ShutdownAfterJobFinishes bool `json:"shutdownAfterJobFinishes,omitempty"`
// suspend specifies whether the RayJob controller should create a RayCluster instance
// If a job is applied with the suspend field set to true,
// the RayCluster will not be created and will wait for the transition to false.
// If the RayCluster is already created, it will be deleted.
// In case of transition to false a new RayCluster will be created.
Suspend bool `json:"suspend,omitempty"`
// SubmitterPodTemplate is the template for the pod that will run `ray job submit`.
SubmitterPodTemplate *corev1.PodTemplateSpec `json:"submitterPodTemplate,omitempty"`
// EntrypointNumCpus specifies the number of cpus to reserve for the entrypoint command.
EntrypointNumCpus float32 `json:"entrypointNumCpus,omitempty"`
// EntrypointNumGpus specifies the number of gpus to reserve for the entrypoint command.
EntrypointNumGpus float32 `json:"entrypointNumGpus,omitempty"`
// EntrypointResources specifies the custom resources and quantities to reserve for the
// entrypoint command.
EntrypointResources string `json:"entrypointResources,omitempty"`
// Configurations of submitter k8s job.
SubmitterConfig *SubmitterConfig `json:"submitterConfig,omitempty"`
}

// RayJobStatus defines the observed state of RayJob
Expand Down
26 changes: 13 additions & 13 deletions ray-operator/apis/ray/v1/rayservice_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,33 +51,33 @@ var DeploymentStatusEnum = struct {

// RayServiceSpec defines the desired state of RayService
type RayServiceSpec struct {
// Important: Run "make" to regenerate code after modifying this file
// Defines the applications and deployments to deploy, should be a YAML multi-line scalar string.
ServeConfigV2 string `json:"serveConfigV2,omitempty"`
RayClusterSpec RayClusterSpec `json:"rayClusterConfig,omitempty"`
// Deprecated: This field is not used anymore. ref: https://github.com/ray-project/kuberay/issues/1685
ServiceUnhealthySecondThreshold *int32 `json:"serviceUnhealthySecondThreshold,omitempty"`
// Deprecated: This field is not used anymore. ref: https://github.com/ray-project/kuberay/issues/1685
DeploymentUnhealthySecondThreshold *int32 `json:"deploymentUnhealthySecondThreshold,omitempty"`
// ServeService is the Kubernetes service for head node and worker nodes who have healthy http proxy to serve traffics.
ServeService *corev1.Service `json:"serveService,omitempty"`
// Important: Run "make" to regenerate code after modifying this file
// Defines the applications and deployments to deploy, should be a YAML multi-line scalar string.
ServeConfigV2 string `json:"serveConfigV2,omitempty"`
RayClusterSpec RayClusterSpec `json:"rayClusterConfig,omitempty"`
}

// RayServiceStatuses defines the observed state of RayService
type RayServiceStatuses struct {
// LastUpdateTime represents the timestamp when the RayService status was last updated.
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
// ServiceStatus indicates the current RayService status.
ServiceStatus ServiceStatus `json:"serviceStatus,omitempty"`
ActiveServiceStatus RayServiceStatus `json:"activeServiceStatus,omitempty"`
// Pending Service Status indicates a RayCluster will be created or is being created.
PendingServiceStatus RayServiceStatus `json:"pendingServiceStatus,omitempty"`
// ServiceStatus indicates the current RayService status.
ServiceStatus ServiceStatus `json:"serviceStatus,omitempty"`
// NumServeEndpoints indicates the number of Ray Pods that are actively serving or have been selected by the serve service.
// Ray Pods without a proxy actor or those that are unhealthy will not be counted.
NumServeEndpoints int32 `json:"numServeEndpoints,omitempty"`
// observedGeneration is the most recent generation observed for this RayService. It corresponds to the
// RayService's generation, which is updated on mutation by the API Server.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// LastUpdateTime represents the timestamp when the RayService status was last updated.
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
}

type RayServiceStatus struct {
Expand All @@ -88,23 +88,23 @@ type RayServiceStatus struct {
}

type AppStatus struct {
Status string `json:"status,omitempty"`
Message string `json:"message,omitempty"`
// Keep track of how long the service is healthy.
// Update when Serve deployment is healthy or first time convert to unhealthy from healthy.
HealthLastUpdateTime *metav1.Time `json:"healthLastUpdateTime,omitempty"`
Deployments map[string]ServeDeploymentStatus `json:"serveDeploymentStatuses,omitempty"`
Status string `json:"status,omitempty"`
Message string `json:"message,omitempty"`
}

// ServeDeploymentStatus defines the current state of a Serve deployment
type ServeDeploymentStatus struct {
// Keep track of how long the service is healthy.
// Update when Serve deployment is healthy or first time convert to unhealthy from healthy.
HealthLastUpdateTime *metav1.Time `json:"healthLastUpdateTime,omitempty"`
// Name, Status, Message are from Ray Dashboard and represent a Serve deployment's state.
// TODO: change status type to enum
Status string `json:"status,omitempty"`
Message string `json:"message,omitempty"`
// Keep track of how long the service is healthy.
// Update when Serve deployment is healthy or first time convert to unhealthy from healthy.
HealthLastUpdateTime *metav1.Time `json:"healthLastUpdateTime,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
Loading

0 comments on commit a43217b

Please sign in to comment.