Skip to content

Commit

Permalink
Fix/pd scale in topo not updated (#824)
Browse files Browse the repository at this point in the history
  • Loading branch information
jsvisa authored and lucklove committed Oct 23, 2020
1 parent 9641536 commit 1fb0740
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 18 deletions.
23 changes: 6 additions & 17 deletions components/cluster/command/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,25 +37,14 @@ func newScaleInCmd() *cobra.Command {
scale := func(b *task.Builder, imetadata spec.Metadata, tlsCfg *tls.Config) {
metadata := imetadata.(*spec.ClusterMeta)

nodes := gOpt.Nodes
if !gOpt.Force {
b.ClusterOperate(metadata.Topology, operator.ScaleInOperation, gOpt, tlsCfg).
UpdateMeta(clusterName, metadata, operator.AsyncNodes(metadata.Topology, gOpt.Nodes, false)).
UpdateTopology(
clusterName,
tidbSpec.Path(clusterName),
metadata,
operator.AsyncNodes(metadata.Topology, gOpt.Nodes, false), /* deleteNodeIds */
)
} else {
b.ClusterOperate(metadata.Topology, operator.ScaleInOperation, gOpt, tlsCfg).
UpdateMeta(clusterName, metadata, gOpt.Nodes).
UpdateTopology(
clusterName,
tidbSpec.Path(clusterName),
metadata,
gOpt.Nodes,
)
nodes = operator.AsyncNodes(metadata.Topology, nodes, false)
}

b.ClusterOperate(metadata.Topology, operator.ScaleInOperation, gOpt, tlsCfg).
UpdateMeta(clusterName, metadata, nodes).
UpdateTopology(clusterName, tidbSpec.Path(clusterName), metadata, nodes)
}

return manager.ScaleIn(
Expand Down
10 changes: 10 additions & 0 deletions pkg/cluster/operation/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,16 @@ func ScaleInCluster(
}
}

pdServers := make([]spec.PDSpec, 0, len(cluster.PDServers))
for i := 0; i < len(cluster.PDServers); i++ {
s := cluster.PDServers[i]
id := s.Host + ":" + strconv.Itoa(s.ClientPort)
if !deletedNodes.Exist(id) {
pdServers = append(pdServers, s)
}
}
cluster.PDServers = pdServers

for i := 0; i < len(cluster.TiKVServers); i++ {
s := cluster.TiKVServers[i]
id := s.Host + ":" + strconv.Itoa(s.Port)
Expand Down
2 changes: 1 addition & 1 deletion tests/tiup-cluster/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ cp -r ./local /tmp/local
ls -lh /tmp/local

function tiup-cluster() {
mkdir -p "~/.tiup/bin" && cp -f ./root.json ~/.tiup/bin/
mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/
# echo "in function"
if [ -f "./bin/tiup-cluster.test" ]; then
./bin/tiup-cluster.test -test.coverprofile=./cover/cov.itest-$(date +'%s')-$RANDOM.out __DEVEL--i-heard-you-like-tests "$@"
Expand Down
7 changes: 7 additions & 0 deletions tests/tiup-cluster/script/scale_core.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,17 @@ function scale_core() {
echo "start scale in pd"
tiup-cluster $client --yes scale-in $name -N $ipprefix.103:2379
wait_instance_num_reach $name $total_sub_one $native_ssh

# validate https://github.com/pingcap/tiup/issues/786
# ensure that this instance is removed from the startup scripts of other components that need to rely on PD
! tiup-cluster $client exec $name -N $ipprefix.101 --command "grep -q $ipprefix.103:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh"
echo "start scale out pd"
topo=./topo/full_scale_in_pd.yaml
sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo
tiup-cluster $client --yes scale-out $name $topo

# after scalue-out, ensure this instance come back
tiup-cluster $client exec $name -N $ipprefix.101 --command "grep -q $ipprefix.103:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh"

tiup-cluster $client _test $name writable
}

0 comments on commit 1fb0740

Please sign in to comment.