From 8d536378a356e7743ac5edde757c5c2aa188ccda Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Wed, 9 Aug 2023 19:48:29 +0000 Subject: [PATCH] go: eliminate helpers in favor of min/max --- .../allocrunner/taskrunner/driver_handle.go | 3 +- .../allocrunner/taskrunner/getter/params.go | 15 +++--- client/client.go | 2 +- command/job_restart.go | 3 +- go.mod | 2 +- helper/cluster_test.go | 2 +- helper/funcs.go | 17 ------- helper/funcs_test.go | 46 ------------------- nomad/blocked_evals.go | 6 +-- nomad/drainer/watch_jobs.go | 2 +- nomad/job_endpoint.go | 2 +- nomad/node_pool_endpoint.go | 10 ++-- nomad/scaling_endpoint.go | 6 +-- nomad/server.go | 4 +- nomad/state/state_store_variables.go | 6 +-- nomad/structs/structs.go | 2 +- scheduler/propertyset.go | 4 +- scheduler/reconcile.go | 16 +++---- 18 files changed, 37 insertions(+), 111 deletions(-) diff --git a/client/allocrunner/taskrunner/driver_handle.go b/client/allocrunner/taskrunner/driver_handle.go index 5b8872325207..aefd04bfb7f3 100644 --- a/client/allocrunner/taskrunner/driver_handle.go +++ b/client/allocrunner/taskrunner/driver_handle.go @@ -9,7 +9,6 @@ import ( "time" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) @@ -26,7 +25,7 @@ func NewDriverHandle( net: net, taskID: taskID, killSignal: task.KillSignal, - killTimeout: helper.Min(task.KillTimeout, maxKillTimeout), + killTimeout: min(task.KillTimeout, maxKillTimeout), } } diff --git a/client/allocrunner/taskrunner/getter/params.go b/client/allocrunner/taskrunner/getter/params.go index 98e225c8d6a5..3e8b6aed7204 100644 --- a/client/allocrunner/taskrunner/getter/params.go +++ b/client/allocrunner/taskrunner/getter/params.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-getter" - "github.com/hashicorp/nomad/helper" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -65,13 +64,13 @@ func (p *parameters) read(r io.Reader) error { // terminated via signal. func (p *parameters) deadline() time.Duration { const minimum = 30 * time.Minute - max := minimum - max = helper.Max(max, p.HTTPReadTimeout) - max = helper.Max(max, p.GCSTimeout) - max = helper.Max(max, p.GitTimeout) - max = helper.Max(max, p.HgTimeout) - max = helper.Max(max, p.S3Timeout) - return max + 1*time.Minute + maximum := minimum + maximum = max(maximum, p.HTTPReadTimeout) + maximum = max(maximum, p.GCSTimeout) + maximum = max(maximum, p.GitTimeout) + maximum = max(maximum, p.HgTimeout) + maximum = max(maximum, p.S3Timeout) + return maximum + 1*time.Minute } // Equal returns whether p and o are the same. diff --git a/client/client.go b/client/client.go index b6e38544fdcf..359fa2ab65ed 100644 --- a/client/client.go +++ b/client/client.go @@ -3005,7 +3005,7 @@ func (c *Client) consulDiscoveryImpl() error { // datacenterQueryLimit, the next heartbeat will pick // a new set of servers so it's okay. shuffleStrings(dcs[1:]) - dcs = dcs[0:helper.Min(len(dcs), datacenterQueryLimit)] + dcs = dcs[0:min(len(dcs), datacenterQueryLimit)] } serviceName := c.GetConfig().ConsulConfig.ServerServiceName diff --git a/command/job_restart.go b/command/job_restart.go index 6f83bce3cc9b..d4c7f28fa238 100644 --- a/command/job_restart.go +++ b/command/job_restart.go @@ -21,7 +21,6 @@ import ( "github.com/hashicorp/go-set" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" - "github.com/hashicorp/nomad/helper" "github.com/posener/complete" ) @@ -370,7 +369,7 @@ func (c *JobRestartCommand) Run(args []string) int { "[bold]==> %s: Restarting %s batch of %d allocations[reset]", formatTime(time.Now()), humanize.Ordinal(batchNumber), - helper.Min(c.batchSize, remaining), + min(c.batchSize, remaining), ))) } diff --git a/go.mod b/go.mod index f70f288f04a6..8f90ed81c288 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/nomad -go 1.20 +go 1.21 // Pinned dependencies are noted in github.com/hashicorp/nomad/issues/11826 replace ( diff --git a/helper/cluster_test.go b/helper/cluster_test.go index 04ae9d3be53c..b3bbfbdb5527 100644 --- a/helper/cluster_test.go +++ b/helper/cluster_test.go @@ -21,7 +21,7 @@ func TestCluster_RandomStagger(t *testing.T) { } abs := func(d time.Duration) time.Duration { - return Max(d, -d) + return max(d, -d) } for _, tc := range cases { diff --git a/helper/funcs.go b/helper/funcs.go index e405ae5346d5..5094cbe84e53 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -18,7 +18,6 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set" "github.com/hashicorp/hcl/hcl/ast" - "golang.org/x/exp/constraints" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) @@ -82,22 +81,6 @@ func HashUUID(input string) (output string, hashed bool) { return output, true } -// Min returns the minimum of a and b. -func Min[T constraints.Ordered](a, b T) T { - if a < b { - return a - } - return b -} - -// Max returns the maximum of a and b. -func Max[T constraints.Ordered](a, b T) T { - if a > b { - return a - } - return b -} - // UniqueMapSliceValues returns the union of values from each slice in a map[K][]V. func UniqueMapSliceValues[K, V comparable](m map[K][]V) []V { s := set.New[V](0) diff --git a/helper/funcs_test.go b/helper/funcs_test.go index ed7245bd4eae..80c259d2f491 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -15,52 +15,6 @@ import ( "golang.org/x/exp/maps" ) -func Test_Min(t *testing.T) { - t.Run("int", func(t *testing.T) { - a := 1 - b := 2 - must.Eq(t, 1, Min(a, b)) - must.Eq(t, 1, Min(b, a)) - }) - - t.Run("float64", func(t *testing.T) { - a := 1.1 - b := 2.2 - must.Eq(t, 1.1, Min(a, b)) - must.Eq(t, 1.1, Min(b, a)) - }) - - t.Run("string", func(t *testing.T) { - a := "cat" - b := "dog" - must.Eq(t, "cat", Min(a, b)) - must.Eq(t, "cat", Min(b, a)) - }) -} - -func Test_Max(t *testing.T) { - t.Run("int", func(t *testing.T) { - a := 1 - b := 2 - must.Eq(t, 2, Max(a, b)) - must.Eq(t, 2, Max(b, a)) - }) - - t.Run("float64", func(t *testing.T) { - a := 1.1 - b := 2.2 - must.Eq(t, 2.2, Max(a, b)) - must.Eq(t, 2.2, Max(b, a)) - }) - - t.Run("string", func(t *testing.T) { - a := "cat" - b := "dog" - must.Eq(t, "dog", Max(a, b)) - must.Eq(t, "dog", Max(b, a)) - }) -} - func TestIsSubset(t *testing.T) { l := []string{"a", "b", "c"} s := []string{"d"} diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index 8ecccab80e76..1ec1beee4a38 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -293,7 +293,7 @@ func latestEvalIndex(eval *structs.Evaluation) uint64 { return 0 } - return helper.Max(eval.CreateIndex, eval.SnapshotIndex) + return max(eval.CreateIndex, eval.SnapshotIndex) } // missedUnblock returns whether an evaluation missed an unblock while it was in @@ -545,9 +545,9 @@ func (b *BlockedEvals) unblock(computedClass, quota string, index uint64) { // Every eval that has escaped computed node class has to be unblocked // because any node could potentially be feasible. - numEscaped := len(b.escaped) numQuotaLimit := 0 - unblocked := make(map[*structs.Evaluation]string, helper.Max(numEscaped, 4)) + numEscaped := len(b.escaped) + unblocked := make(map[*structs.Evaluation]string, max(uint64(numEscaped), 4)) if numEscaped != 0 && computedClass != "" { for id, wrapped := range b.escaped { diff --git a/nomad/drainer/watch_jobs.go b/nomad/drainer/watch_jobs.go index e2c456bef664..523a198dd491 100644 --- a/nomad/drainer/watch_jobs.go +++ b/nomad/drainer/watch_jobs.go @@ -414,7 +414,7 @@ func handleTaskGroup(snap *state.StateSnapshot, batch bool, tg *structs.TaskGrou // Determine how many we can drain thresholdCount := tg.Count - tg.Migrate.MaxParallel numToDrain := healthy - thresholdCount - numToDrain = helper.Min(len(drainable), numToDrain) + numToDrain = min(len(drainable), numToDrain) if numToDrain <= 0 { return nil } diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index c17cc28585e0..274000cabcaa 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -1502,7 +1502,7 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) if err != nil { return err } - reply.Index = helper.Max(jindex, sindex) + reply.Index = max(jindex, sindex) // Set the query response j.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/node_pool_endpoint.go b/nomad/node_pool_endpoint.go index c0de8b1e57d5..ac71ba721023 100644 --- a/nomad/node_pool_endpoint.go +++ b/nomad/node_pool_endpoint.go @@ -12,9 +12,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-memdb" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" @@ -106,7 +104,7 @@ func (n *NodePool) List(args *structs.NodePoolListRequest, reply *structs.NodePo if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) // Set the query response. n.srv.setQueryMeta(&reply.QueryMeta) @@ -161,7 +159,7 @@ func (n *NodePool) GetNodePool(args *structs.NodePoolSpecificRequest, reply *str if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) } return nil }} @@ -503,7 +501,7 @@ func (n *NodePool) ListJobs(args *structs.NodePoolJobsRequest, reply *structs.No if err != nil { return err } - reply.Index = helper.Max(jindex, sindex) + reply.Index = max(jindex, sindex) // Set the query response n.srv.setQueryMeta(&reply.QueryMeta) @@ -593,7 +591,7 @@ func (n *NodePool) ListNodes(args *structs.NodePoolNodesRequest, reply *structs. if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) // Set the query response. n.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/scaling_endpoint.go b/nomad/scaling_endpoint.go index 3a7b0ecbf62d..1a9f2544df77 100644 --- a/nomad/scaling_endpoint.go +++ b/nomad/scaling_endpoint.go @@ -10,9 +10,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -148,7 +146,7 @@ func (p *Scaling) GetPolicy(args *structs.ScalingPolicySpecificRequest, if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) } return nil }} @@ -212,7 +210,7 @@ func (p *Scaling) listAllNamespaces(args *structs.ScalingPolicyListRequest, repl if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) // Set the query response p.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/server.go b/nomad/server.go index 12f25a646e8f..82f96999bf6e 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -977,7 +977,7 @@ func (s *Server) setupBootstrapHandler() error { // walk all datacenter until it finds enough hosts to // form a quorum. shuffleStrings(dcs[1:]) - dcs = dcs[0:helper.Min(len(dcs), datacenterQueryLimit)] + dcs = dcs[0:min(len(dcs), datacenterQueryLimit)] } nomadServerServiceName := s.config.ConsulConfig.ServerServiceName @@ -2010,7 +2010,7 @@ func (s *Server) setReplyQueryMeta(stateStore *state.StateStore, table string, r if err != nil { return err } - reply.Index = helper.Max(1, index) + reply.Index = max(1, index) // Set the query response. s.setQueryMeta(reply) diff --git a/nomad/state/state_store_variables.go b/nomad/state/state_store_variables.go index d39d6fa6c638..83311d8912dc 100644 --- a/nomad/state/state_store_variables.go +++ b/nomad/state/state_store_variables.go @@ -8,8 +8,6 @@ import ( "math" "github.com/hashicorp/go-memdb" - - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -251,7 +249,7 @@ func (s *StateStore) varSetTxn(tx WriteTxn, idx uint64, req *structs.VarApplySta if quotaChange > 0 { quotaUsed.Size += quotaChange } else if quotaChange < 0 { - quotaUsed.Size -= helper.Min(quotaUsed.Size, -quotaChange) + quotaUsed.Size -= min(quotaUsed.Size, -quotaChange) } err = s.enforceVariablesQuota(idx, tx, sv.Namespace, quotaChange) @@ -392,7 +390,7 @@ func (s *StateStore) svDeleteTxn(tx WriteTxn, idx uint64, req *structs.VarApplyS if existingQuota != nil { quotaUsed := existingQuota.(*structs.VariablesQuota) quotaUsed = quotaUsed.Copy() - quotaUsed.Size -= helper.Min(quotaUsed.Size, int64(len(sv.Data))) + quotaUsed.Size -= min(quotaUsed.Size, int64(len(sv.Data))) quotaUsed.ModifyIndex = idx if err := tx.Insert(TableVariablesQuotas, quotaUsed); err != nil { return req.ErrorResponse(idx, fmt.Errorf("variable quota insert failed: %v", err)) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 68943f871dad..949d35c6b4ac 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -9105,7 +9105,7 @@ func (e *TaskEvent) SetValidationError(err error) *TaskEvent { } func (e *TaskEvent) SetKillTimeout(timeout, maxTimeout time.Duration) *TaskEvent { - actual := helper.Min(timeout, maxTimeout) + actual := min(timeout, maxTimeout) e.KillTimeout = actual e.Details["kill_timeout"] = actual.String() return e diff --git a/scheduler/propertyset.go b/scheduler/propertyset.go index 48d46ea82c30..ea53678d3158 100644 --- a/scheduler/propertyset.go +++ b/scheduler/propertyset.go @@ -10,8 +10,6 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-set" - - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -263,7 +261,7 @@ func (p *propertySet) UsedCount(option *structs.Node, _ string) (string, string, // existing and proposed allocations. It also takes into account any stopped // allocations func (p *propertySet) GetCombinedUseMap() map[string]uint64 { - combinedUse := make(map[string]uint64, helper.Max(len(p.existingValues), len(p.proposedValues))) + combinedUse := make(map[string]uint64, max(len(p.existingValues), len(p.proposedValues))) for _, usedValues := range []map[string]uint64{p.existingValues, p.proposedValues} { for propertyValue, usedCount := range usedValues { targetPropertyValue := p.targetedPropertyValue(propertyValue) diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index a170e6c681c2..db0ecda4b5f2 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -816,8 +816,8 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired a.markStop(failed, "", allocRescheduled) desiredChanges.Stop += uint64(len(failed)) - min := helper.Min(len(place), underProvisionedBy) - underProvisionedBy -= min + minimum := min(len(place), underProvisionedBy) + underProvisionedBy -= minimum return underProvisionedBy } @@ -828,7 +828,7 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired // If allocs have been lost, determine the number of replacements that are needed // and add placements to the result for the lost allocs. if len(lost) != 0 { - allowed := helper.Min(len(lost), len(place)) + allowed := min(len(lost), len(place)) desiredChanges.Place += uint64(allowed) a.result.place = append(a.result.place, place[:allowed]...) } @@ -869,10 +869,10 @@ func (a *allocReconciler) computeDestructiveUpdates(destructive allocSet, underP desiredChanges *structs.DesiredUpdates, tg *structs.TaskGroup) { // Do all destructive updates - min := helper.Min(len(destructive), underProvisionedBy) - desiredChanges.DestructiveUpdate += uint64(min) - desiredChanges.Ignore += uint64(len(destructive) - min) - for _, alloc := range destructive.nameOrder()[:min] { + minimum := min(len(destructive), underProvisionedBy) + desiredChanges.DestructiveUpdate += uint64(minimum) + desiredChanges.Ignore += uint64(len(destructive) - minimum) + for _, alloc := range destructive.nameOrder()[:minimum] { a.result.destructiveUpdate = append(a.result.destructiveUpdate, allocDestructiveResult{ placeName: alloc.Name, placeTaskGroup: tg, @@ -948,7 +948,7 @@ func (a *allocReconciler) isDeploymentComplete(groupName string, destructive, in // Final check to see if the deployment is complete is to ensure everything is healthy if dstate, ok := a.deployment.TaskGroups[groupName]; ok { - if dstate.HealthyAllocs < helper.Max(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs + if dstate.HealthyAllocs < max(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs (dstate.DesiredCanaries > 0 && !dstate.Promoted) { // Make sure we are promoted if we have canaries complete = false }