Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: don't reload prometheus if the scaled comp is itself #989

Merged
2 changes: 1 addition & 1 deletion components/cluster/command/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func newPatchCmd() *cobra.Command {

cmd.Flags().BoolVar(&overwrite, "overwrite", false, "Use this package in the future scale-out operations")
cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes")
cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the role")
cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the roles")
cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 300, "Timeout in seconds when transferring PD and TiKV store leaders")
return cmd
}
2 changes: 1 addition & 1 deletion components/dm/command/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,6 @@ func newPatchCmd() *cobra.Command {

cmd.Flags().BoolVar(&overwrite, "overwrite", false, "Use this package in the future scale-out operations")
cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes")
cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the role")
cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the roles")
return cmd
}
19 changes: 13 additions & 6 deletions pkg/cluster/manager/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ import (
"github.com/pingcap/tiup/pkg/set"
)

// Dynamic reload Prometheus configuration
func buildDynReloadPromTasks(topo spec.Topology) []task.Task {
// buildReloadPromTasks reloads Prometheus configuration
func buildReloadPromTasks(topo spec.Topology, nodes ...string) []task.Task {
monitor := spec.FindComponent(topo, spec.ComponentPrometheus)
if monitor == nil {
return nil
Expand All @@ -40,11 +40,18 @@ func buildDynReloadPromTasks(topo spec.Topology) []task.Task {
if len(instances) == 0 {
return nil
}
var dynReloadTasks []task.Task
var tasks []task.Task
deletedNodes := set.NewStringSet(nodes...)
for _, inst := range monitor.Instances() {
dynReloadTasks = append(dynReloadTasks, task.NewBuilder().SystemCtl(inst.GetHost(), inst.ServiceName(), "reload", true).Build())
if deletedNodes.Exist(inst.ID()) {
continue
}
t := task.NewBuilder().
SystemCtl(inst.GetHost(), inst.ServiceName(), "reload", true).
Build()
tasks = append(tasks, t)
}
return dynReloadTasks
return tasks
}

func buildScaleOutTask(
Expand Down Expand Up @@ -298,7 +305,7 @@ func buildScaleOutTask(
return operator.Start(ctx, newPart, operator.Options{OptTimeout: gOpt.OptTimeout}, tlsCfg)
}).
Parallel(false, refreshConfigTasks...).
Parallel(false, buildDynReloadPromTasks(metadata.GetTopology())...)
Parallel(false, buildReloadPromTasks(metadata.GetTopology())...)

if final != nil {
final(builder, name, metadata)
Expand Down
5 changes: 4 additions & 1 deletion pkg/cluster/manager/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,10 @@ func (m *Manager) DestroyTombstone(
UpdateTopology(name, m.specManager.Path(name), clusterMeta, nodes)

regenConfigTasks, _ := buildRegenConfigTasks(m, name, topo, base, nodes)
t := b.ParallelStep("+ Refresh instance configs", true, regenConfigTasks...).Parallel(true, buildDynReloadPromTasks(metadata.GetTopology())...).Build()
t := b.
ParallelStep("+ Refresh instance configs", true, regenConfigTasks...).
Parallel(true, buildReloadPromTasks(metadata.GetTopology())...).
Build()
if err := t.Execute(task.NewContext()); err != nil {
if errorx.Cast(err) != nil {
// FIXME: Map possible task errors and give suggestions.
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/manager/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func (m *Manager) ScaleIn(

t := b.
ParallelStep("+ Refresh instance configs", force, regenConfigTasks...).
Parallel(force, buildDynReloadPromTasks(metadata.GetTopology())...).
Parallel(force, buildReloadPromTasks(metadata.GetTopology(), nodes...)...).
Build()

if err := t.Execute(task.NewContext()); err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/cluster/task/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (b *Builder) UpdateMeta(cluster string, metadata *spec.ClusterMeta, deleted
b.tasks = append(b.tasks, &UpdateMeta{
cluster: cluster,
metadata: metadata,
deletedNodesID: deletedNodeIds,
deletedNodeIDs: deletedNodeIds,
})
return b
}
Expand All @@ -123,7 +123,7 @@ func (b *Builder) UpdateTopology(cluster, profile string, metadata *spec.Cluster
metadata: metadata,
cluster: cluster,
profileDir: profile,
deletedNodesID: deletedNodeIds,
deletedNodeIDs: deletedNodeIds,
})
return b
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/cluster/task/update_meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ import (
type UpdateMeta struct {
cluster string
metadata *spec.ClusterMeta
deletedNodesID []string
deletedNodeIDs []string
}

// Execute implements the Task interface
// the metadata especially the topology is in wide use,
// the other callers point to this field by a pointer,
// so we should update the original topology directly, and don't make a copy
func (u *UpdateMeta) Execute(ctx *Context) error {
deleted := set.NewStringSet(u.deletedNodesID...)
deleted := set.NewStringSet(u.deletedNodeIDs...)
topo := u.metadata.Topology

tidbServers := make([]spec.TiDBSpec, 0)
Expand Down Expand Up @@ -154,5 +154,5 @@ func (u *UpdateMeta) Rollback(ctx *Context) error {

// String implements the fmt.Stringer interface
func (u *UpdateMeta) String() string {
return fmt.Sprintf("UpdateMeta: cluster=%s, deleted=`'%s'`", u.cluster, strings.Join(u.deletedNodesID, "','"))
return fmt.Sprintf("UpdateMeta: cluster=%s, deleted=`'%s'`", u.cluster, strings.Join(u.deletedNodeIDs, "','"))
}
4 changes: 2 additions & 2 deletions pkg/cluster/task/update_topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ type UpdateTopology struct {
cluster string
profileDir string
metadata *spec.ClusterMeta
deletedNodesID []string
deletedNodeIDs []string
}

// String implements the fmt.Stringer interface
Expand All @@ -41,7 +41,7 @@ func (u *UpdateTopology) Execute(ctx *Context) error {

topo := u.metadata.Topology

deleted := set.NewStringSet(u.deletedNodesID...)
deleted := set.NewStringSet(u.deletedNodeIDs...)

var ops []clientv3.Op
var instances []spec.Instance
Expand Down
12 changes: 12 additions & 0 deletions tests/tiup-cluster/script/scale_tools.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,12 @@ function scale_tools() {

if [ $test_tls = true ]; then
total_sub_one=18
total=19
total_add_one=20
else
total_sub_one=21
total=22
total_add_one=23
fi

echo "start scale in pump"
Expand Down Expand Up @@ -70,6 +74,14 @@ function scale_tools() {
topo=./topo/full_scale_in_grafana.yaml
tiup-cluster $client --yes scale-out $name $topo

echo "start scale out prometheus"
topo=./topo/full_scale_in_prometheus.yaml
wait_instance_num_reach $name $total_add_one $native_ssh
9547 marked this conversation as resolved.
Show resolved Hide resolved
tiup-cluster $client --yes scale-out $name $topo
echo "start scale in prometheus"
tiup-cluster $client --yes scale-in $name -N n2:9090
wait_instance_num_reach $name $total $native_ssh

# make sure grafana dashboards has been set to default (since the full_sale_in_grafana.yaml didn't provide a local dashboards dir)
! tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json"

Expand Down
1 change: 0 additions & 1 deletion tests/tiup-cluster/topo/.gitignore

This file was deleted.

3 changes: 3 additions & 0 deletions tests/tiup-cluster/topo/full_scale_in_prometheus.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
monitoring_servers:
- host: n2
rule_dir: /tmp/local/prometheus