diff --git a/tests/tiup-cluster/script/cmd_subtest.sh b/tests/tiup-cluster/script/cmd_subtest.sh index 835191f83b..71e43a70dc 100755 --- a/tests/tiup-cluster/script/cmd_subtest.sh +++ b/tests/tiup-cluster/script/cmd_subtest.sh @@ -6,7 +6,6 @@ function cmd_subtest() { version=$1 test_tls=$2 native_ssh=$3 - ipprefix=${TIUP_TEST_IP_PREFIX:-"$ipprefix"} name="test_cmd_$RANDOM" if [ $test_tls = true ]; then @@ -14,7 +13,6 @@ function cmd_subtest() { else topo=./topo/full.yaml fi - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo client="" if [ $native_ssh == true ]; then @@ -41,9 +39,9 @@ function cmd_subtest() { tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa --skip-create-user # check the local config - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" tiup-cluster $client list | grep "$name" @@ -59,9 +57,9 @@ function cmd_subtest() { # check the data dir of tikv # it's ok to omit client type after deploy - tiup-cluster exec $name -N $ipprefix.101 --command "grep /home/tidb/deploy/tikv-20160/data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" - tiup-cluster exec $name -N $ipprefix.101 --command "grep advertise-status-addr /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" - tiup-cluster exec $name -N $ipprefix.103 --command "grep /home/tidb/my_kv_data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" + tiup-cluster exec $name -N n1 --command "grep /home/tidb/deploy/tikv-20160/data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" + tiup-cluster exec $name -N n1 --command "grep advertise-status-addr /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" + tiup-cluster exec $name -N n3 --command "grep /home/tidb/my_kv_data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" # test patch overwrite tiup-cluster $client --yes patch $name ~/.tiup/storage/cluster/packages/tidb-$version-linux-amd64.tar.gz -R tidb --overwrite @@ -95,11 +93,11 @@ function cmd_subtest() { tiup-cluster $client exec $name -R tidb --command="systemctl status tidb-4000|grep 'enabled;'" tiup-cluster $client exec $name -R pd --command="systemctl status pd-2379|grep 'enabled;'" - tiup-cluster $client --yes clean $name --data --all --ignore-node $ipprefix.101:9090 + tiup-cluster $client --yes clean $name --data --all --ignore-node n1:9090 echo "checking cleanup data and log" - tiup-cluster $client exec $name -N $ipprefix.101 --command "ls /home/tidb/deploy/prometheus-9090/log/prometheus.log" - ! tiup-cluster $client exec $name -N $ipprefix.101 --command "ls /home/tidb/deploy/tikv-20160/log/tikv.log" + tiup-cluster $client exec $name -N n1 --command "ls /home/tidb/deploy/prometheus-9090/log/prometheus.log" + ! tiup-cluster $client exec $name -N n1 --command "ls /home/tidb/deploy/tikv-20160/log/tikv.log" tiup-cluster $client --yes start $name @@ -109,6 +107,6 @@ function cmd_subtest() { tiup-cluster $client --yes destroy $name # after destroy the cluster, the public key should be deleted - ! ssh -o "StrictHostKeyChecking=no" -o "PasswordAuthentication=no" -i "/tmp/$name.id_rsa" tidb@$ipprefix.101 "ls" + ! ssh -o "StrictHostKeyChecking=no" -o "PasswordAuthentication=no" -i "/tmp/$name.id_rsa" tidb@n1 "ls" unlink "/tmp/$name.id_rsa" } diff --git a/tests/tiup-cluster/script/scale_core.sh b/tests/tiup-cluster/script/scale_core.sh index f18dfaef6c..d3f18f7088 100755 --- a/tests/tiup-cluster/script/scale_core.sh +++ b/tests/tiup-cluster/script/scale_core.sh @@ -6,7 +6,6 @@ function scale_core() { version=$1 test_tls=$2 native_ssh=$3 - ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} client="" if [ $native_ssh == true ]; then @@ -19,7 +18,6 @@ function scale_core() { else topo=./topo/full.yaml fi - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa @@ -40,55 +38,51 @@ function scale_core() { fi echo "start scale in tidb" - tiup-cluster $client --yes scale-in $name -N $ipprefix.101:4000 + tiup-cluster $client --yes scale-in $name -N n1:4000 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out tidb" topo=./topo/full_scale_in_tidb.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo # after scale-out, ensure the service is enabled - tiup-cluster $client exec $name -N $ipprefix.101 --command "systemctl status tidb-4000 | grep Loaded |grep 'enabled; vendor'" + tiup-cluster $client exec $name -N n1 --command "systemctl status tidb-4000 | grep Loaded |grep 'enabled; vendor'" # echo "start scale in tikv" - # tiup-cluster --yes scale-in $name -N $ipprefix.103:20160 + # tiup-cluster --yes scale-in $name -N n3:20160 # wait_instance_num_reach $name $total_sub_one $native_ssh # echo "start scale out tikv" # topo=./topo/full_scale_in_tikv.yaml - # sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo # tiup-cluster --yes scale-out $name $topo echo "start scale in pd" - tiup-cluster $client --yes scale-in $name -N $ipprefix.103:2379 + tiup-cluster $client --yes scale-in $name -N n3:2379 wait_instance_num_reach $name $total_sub_one $native_ssh # validate https://github.com/pingcap/tiup/issues/786 # ensure that this instance is removed from the startup scripts of other components that need to rely on PD - ! tiup-cluster $client exec $name -N $ipprefix.101 --command "grep -q $ipprefix.103:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" + ! tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" echo "start scale out pd" topo=./topo/full_scale_in_pd.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo # after scale-out, ensure this instance come back - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep -q $ipprefix.103:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" + tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" echo "start scale in tidb" - tiup-cluster $client --yes scale-in $name -N $ipprefix.102:4000 + tiup-cluster $client --yes scale-in $name -N n2:4000 wait_instance_num_reach $name $total_sub_one $native_ssh - ! tiup-cluster $client exec $name -N $ipprefix.102 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" - ! tiup-cluster $client exec $name -N $ipprefix.102 --command "ps aux | grep node_exporter | grep -qv grep" - ! tiup-cluster $client exec $name -N $ipprefix.102 --command "ps aux | grep blackbox_exporter | grep -qv grep" + ! tiup-cluster $client exec $name -N n2 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" + ! tiup-cluster $client exec $name -N n2 --command "ps aux | grep node_exporter | grep -qv grep" + ! tiup-cluster $client exec $name -N n2 --command "ps aux | grep blackbox_exporter | grep -qv grep" # after all components on the node were scale-ined, the SSH public is automatically deleted - ! ssh -o "StrictHostKeyChecking=no "-o "PasswordAuthentication=no" -i ~/.tiup/storage/cluster/$name/ssh/id_rsa tidb@$ipprefix.102 "ls" + ! ssh -o "StrictHostKeyChecking=no "-o "PasswordAuthentication=no" -i ~/.tiup/storage/cluster/$name/ssh/id_rsa tidb@n2 "ls" echo "start scale out tidb" - topo=./topo/full_scale_in_tidb.yaml - sed "s/__IPPREFIX__.101/$ipprefix.102/g" $topo.tpl > $topo + topo=./topo/full_scale_in_tidb_2nd.yaml tiup-cluster $client --yes scale-out $name $topo # after scalue-out, ensure node_exporter and blackbox_exporter come back - tiup-cluster $client exec $name -N $ipprefix.102 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" - tiup-cluster $client exec $name -N $ipprefix.102 --command "ps aux | grep node_exporter | grep -qv grep" - tiup-cluster $client exec $name -N $ipprefix.102 --command "ps aux | grep blackbox_exporter | grep -qv grep" + tiup-cluster $client exec $name -N n2 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" + tiup-cluster $client exec $name -N n2 --command "ps aux | grep node_exporter | grep -qv grep" + tiup-cluster $client exec $name -N n2 --command "ps aux | grep blackbox_exporter | grep -qv grep" tiup-cluster $client _test $name writable } diff --git a/tests/tiup-cluster/script/scale_tools.sh b/tests/tiup-cluster/script/scale_tools.sh index 844fe16e5a..32e7b05d32 100755 --- a/tests/tiup-cluster/script/scale_tools.sh +++ b/tests/tiup-cluster/script/scale_tools.sh @@ -6,7 +6,6 @@ function scale_tools() { version=$1 test_tls=$2 native_ssh=$3 - ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} client="" if [ $native_ssh == true ]; then @@ -19,14 +18,13 @@ function scale_tools() { else topo=./topo/full.yaml fi - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa # check the local config - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" - tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" + tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" tiup-cluster $client list | grep "$name" @@ -43,59 +41,54 @@ function scale_tools() { fi echo "start scale in pump" - tiup-cluster $client --yes scale-in $name -N $ipprefix.103:8250 + tiup-cluster $client --yes scale-in $name -N n3:8250 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out pump" topo=./topo/full_scale_in_pump.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo echo "start scale in cdc" - yes | tiup-cluster $client scale-in $name -N $ipprefix.103:8300 + yes | tiup-cluster $client scale-in $name -N n3:8300 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out cdc" topo=./topo/full_scale_in_cdc.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo yes | tiup-cluster $client scale-out $name $topo if [ $test_tls = false ]; then echo "start scale in tispark" - yes | tiup-cluster $client --yes scale-in $name -N $ipprefix.104:7078 + yes | tiup-cluster $client --yes scale-in $name -N n4:7078 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out tispark" topo=./topo/full_scale_in_tispark.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo yes | tiup-cluster $client --yes scale-out $name $topo fi echo "start scale in grafana" - tiup-cluster $client --yes scale-in $name -N $ipprefix.101:3000 + tiup-cluster $client --yes scale-in $name -N n1:3000 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out grafana" topo=./topo/full_scale_in_grafana.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo # make sure grafana dashboards has been set to default (since the full_sale_in_grafana.yaml didn't provide a local dashboards dir) - ! tiup-cluster $client exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" + ! tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" # currently tiflash is not supported in TLS enabled cluster # and only Tiflash support data-dir in multipath if [ $test_tls = false ]; then # ensure tiflash's data dir exists - tiup-cluster $client exec $name -N $ipprefix.103 --command "ls /home/tidb/deploy/tiflash-9000/data1" - tiup-cluster $client exec $name -N $ipprefix.103 --command "ls /data/tiflash-data" + tiup-cluster $client exec $name -N n3 --command "ls /home/tidb/deploy/tiflash-9000/data1" + tiup-cluster $client exec $name -N n3 --command "ls /data/tiflash-data" echo "start scale in tiflash" - tiup-cluster $client --yes scale-in $name -N $ipprefix.103:9000 + tiup-cluster $client --yes scale-in $name -N n3:9000 tiup-cluster $client display $name | grep Tombstone echo "start prune tiflash" yes | tiup-cluster $client prune $name wait_instance_num_reach $name $total_sub_one $native_ssh - ! tiup-cluster $client exec $name -N $ipprefix.103 --command "ls /home/tidb/deploy/tiflash-9000/data1" - ! tiup-cluster $client exec $name -N $ipprefix.103 --command "ls /data/tiflash-data" + ! tiup-cluster $client exec $name -N n3 --command "ls /home/tidb/deploy/tiflash-9000/data1" + ! tiup-cluster $client exec $name -N n3 --command "ls /data/tiflash-data" echo "start scale out tiflash" topo=./topo/full_scale_in_tiflash.yaml - sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-cluster $client --yes scale-out $name $topo fi diff --git a/tests/tiup-cluster/test_upgrade.sh b/tests/tiup-cluster/test_upgrade.sh index 55049a5089..7d4ff24ab8 100755 --- a/tests/tiup-cluster/test_upgrade.sh +++ b/tests/tiup-cluster/test_upgrade.sh @@ -6,8 +6,6 @@ version=${version-v4.0.4} old_version=${old_version-v3.0.16} name=test_upgrade topo=./topo/upgrade.yaml -ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} -sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ diff --git a/tests/tiup-cluster/test_upgrade_tls.sh b/tests/tiup-cluster/test_upgrade_tls.sh index 0995291b32..34e4472495 100755 --- a/tests/tiup-cluster/test_upgrade_tls.sh +++ b/tests/tiup-cluster/test_upgrade_tls.sh @@ -6,8 +6,6 @@ version=${version-v4.0.4} old_version=${old_version-v3.0.16} name=test_upgrade_tls topo=./topo/upgrade_tls.yaml -ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} -sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ diff --git a/tests/tiup-cluster/topo/full.yaml.tpl b/tests/tiup-cluster/topo/full.yaml similarity index 57% rename from tests/tiup-cluster/topo/full.yaml.tpl rename to tests/tiup-cluster/topo/full.yaml index 41ef675885..6d9164aa6d 100644 --- a/tests/tiup-cluster/topo/full.yaml.tpl +++ b/tests/tiup-cluster/topo/full.yaml @@ -12,60 +12,60 @@ server_configs: storage.stop-write-at-available-space: 1 mib tidb_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.102 + - host: n1 + - host: n2 pd_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.103 + - host: n1 + - host: n3 data_dir: "/home/tidb/my_kv_data" - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n4 + - host: n5 # tiflash eat too much memory # and binary is more than 1G.. tiflash_servers: - - host: __IPPREFIX__.103 + - host: n3 data_dir: "data1,/data/tiflash-data" -# - host: __IPPREFIX__.104 -# - host: __IPPREFIX__.105 +# - host: n4 +# - host: n5 pump_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 drainer_servers: - - host: __IPPREFIX__.101 + - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" cdc_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 tispark_masters: - - host: __IPPREFIX__.103 + - host: n3 tispark_workers: - - host: __IPPREFIX__.104 + - host: n4 monitoring_servers: - - host: __IPPREFIX__.101 + - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - - host: __IPPREFIX__.101 + - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - - host: __IPPREFIX__.101 + - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml diff --git a/tests/tiup-cluster/topo/full_scale_in_cdc.yaml b/tests/tiup-cluster/topo/full_scale_in_cdc.yaml new file mode 100644 index 0000000000..f78fe562f7 --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_cdc.yaml @@ -0,0 +1,2 @@ +cdc_servers: + - host: n3 diff --git a/tests/tiup-cluster/topo/full_scale_in_cdc.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_cdc.yaml.tpl deleted file mode 100644 index 0483c9d686..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_cdc.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -cdc_servers: - - host: __IPPREFIX__.103 diff --git a/tests/tiup-cluster/topo/full_scale_in_grafana.yaml b/tests/tiup-cluster/topo/full_scale_in_grafana.yaml new file mode 100644 index 0000000000..4e85dd8b2d --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_grafana.yaml @@ -0,0 +1,2 @@ +grafana_servers: + - host: n1 diff --git a/tests/tiup-cluster/topo/full_scale_in_grafana.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_grafana.yaml.tpl deleted file mode 100644 index 774d3931d6..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_grafana.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -grafana_servers: - - host: __IPPREFIX__.101 diff --git a/tests/tiup-cluster/topo/full_scale_in_pd.yaml b/tests/tiup-cluster/topo/full_scale_in_pd.yaml new file mode 100644 index 0000000000..1a4332531e --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_pd.yaml @@ -0,0 +1,2 @@ +pd_servers: + - host: n3 diff --git a/tests/tiup-cluster/topo/full_scale_in_pd.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_pd.yaml.tpl deleted file mode 100644 index 647789e98a..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_pd.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -pd_servers: - - host: __IPPREFIX__.103 diff --git a/tests/tiup-cluster/topo/full_scale_in_pump.yaml b/tests/tiup-cluster/topo/full_scale_in_pump.yaml new file mode 100644 index 0000000000..65ad5e80cd --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_pump.yaml @@ -0,0 +1,2 @@ +pump_servers: + - host: n3 diff --git a/tests/tiup-cluster/topo/full_scale_in_pump.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_pump.yaml.tpl deleted file mode 100644 index ee293b768b..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_pump.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -pump_servers: - - host: __IPPREFIX__.103 diff --git a/tests/tiup-cluster/topo/full_scale_in_tidb.yaml b/tests/tiup-cluster/topo/full_scale_in_tidb.yaml new file mode 100644 index 0000000000..1c4afcd9ae --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_tidb.yaml @@ -0,0 +1,2 @@ +tidb_servers: + - host: n1 diff --git a/tests/tiup-cluster/topo/full_scale_in_tidb.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_tidb.yaml.tpl deleted file mode 100644 index 1a2820f8dc..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_tidb.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -tidb_servers: - - host: __IPPREFIX__.101 diff --git a/tests/tiup-cluster/topo/full_scale_in_tidb_2nd.yaml b/tests/tiup-cluster/topo/full_scale_in_tidb_2nd.yaml new file mode 100644 index 0000000000..224e7dbdaa --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_tidb_2nd.yaml @@ -0,0 +1,2 @@ +tidb_servers: + - host: n2 diff --git a/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml b/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml new file mode 100644 index 0000000000..64589f920d --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml @@ -0,0 +1,2 @@ +tiflash_servers: + - host: n3 diff --git a/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml.tpl deleted file mode 100644 index b4e4efc9c9..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -tiflash_servers: - - host: __IPPREFIX__.103 diff --git a/tests/tiup-cluster/topo/full_scale_in_tikv.yaml b/tests/tiup-cluster/topo/full_scale_in_tikv.yaml new file mode 100644 index 0000000000..7af93681e0 --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_tikv.yaml @@ -0,0 +1,2 @@ +tikv_servers: + - host: n3 diff --git a/tests/tiup-cluster/topo/full_scale_in_tikv.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_tikv.yaml.tpl deleted file mode 100644 index a64cd92150..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_tikv.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -tikv_servers: - - host: __IPPREFIX__.103 diff --git a/tests/tiup-cluster/topo/full_scale_in_tispark.yaml b/tests/tiup-cluster/topo/full_scale_in_tispark.yaml new file mode 100644 index 0000000000..34ba9c8b52 --- /dev/null +++ b/tests/tiup-cluster/topo/full_scale_in_tispark.yaml @@ -0,0 +1,2 @@ +tispark_workers: + - host: n5 diff --git a/tests/tiup-cluster/topo/full_scale_in_tispark.yaml.tpl b/tests/tiup-cluster/topo/full_scale_in_tispark.yaml.tpl deleted file mode 100644 index 9598deb4aa..0000000000 --- a/tests/tiup-cluster/topo/full_scale_in_tispark.yaml.tpl +++ /dev/null @@ -1,2 +0,0 @@ -tispark_workers: - - host: __IPPREFIX__.105 diff --git a/tests/tiup-cluster/topo/full_tls.yaml.tpl b/tests/tiup-cluster/topo/full_tls.yaml similarity index 58% rename from tests/tiup-cluster/topo/full_tls.yaml.tpl rename to tests/tiup-cluster/topo/full_tls.yaml index f01d8a135d..72a7b57cf4 100644 --- a/tests/tiup-cluster/topo/full_tls.yaml.tpl +++ b/tests/tiup-cluster/topo/full_tls.yaml @@ -13,46 +13,46 @@ server_configs: storage.stop-write-at-available-space: 1 mib tidb_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.102 + - host: n1 + - host: n2 pd_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.103 + - host: n1 + - host: n3 data_dir: "/home/tidb/my_kv_data" - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n4 + - host: n5 pump_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 drainer_servers: - - host: __IPPREFIX__.101 + - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" cdc_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 monitoring_servers: - - host: __IPPREFIX__.101 + - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - - host: __IPPREFIX__.101 + - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - - host: __IPPREFIX__.101 + - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml diff --git a/tests/tiup-cluster/topo/upgrade.yaml.tpl b/tests/tiup-cluster/topo/upgrade.yaml similarity index 57% rename from tests/tiup-cluster/topo/upgrade.yaml.tpl rename to tests/tiup-cluster/topo/upgrade.yaml index f9bb0a6166..fd7d3f60e3 100644 --- a/tests/tiup-cluster/topo/upgrade.yaml.tpl +++ b/tests/tiup-cluster/topo/upgrade.yaml @@ -8,47 +8,47 @@ server_configs: storage.stop-write-at-available-space: 1 mib tidb_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.102 + - host: n1 + - host: n2 pd_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - - host: __IPPREFIX__.102 - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n2 + - host: n3 + - host: n4 + - host: n5 # tiflash eat too much memory # and binary is more than 1G.. # tiflash_servers: -# - host: __IPPREFIX__.103 -# - host: __IPPREFIX__.104 -# - host: __IPPREFIX__.105 +# - host: n3 +# - host: n4 +# - host: n5 pump_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 drainer_servers: - - host: __IPPREFIX__.101 + - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" monitoring_servers: - - host: __IPPREFIX__.101 + - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - - host: __IPPREFIX__.101 + - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - - host: __IPPREFIX__.101 + - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml diff --git a/tests/tiup-cluster/topo/upgrade_tls.yaml.tpl b/tests/tiup-cluster/topo/upgrade_tls.yaml similarity index 54% rename from tests/tiup-cluster/topo/upgrade_tls.yaml.tpl rename to tests/tiup-cluster/topo/upgrade_tls.yaml index f4fc4eec53..ff2d16f224 100644 --- a/tests/tiup-cluster/topo/upgrade_tls.yaml.tpl +++ b/tests/tiup-cluster/topo/upgrade_tls.yaml @@ -11,37 +11,37 @@ server_configs: storage.stop-write-at-available-space: 1 mib tidb_servers: - - host: __IPPREFIX__.101 - - host: __IPPREFIX__.102 + - host: n1 + - host: n2 pd_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - - host: __IPPREFIX__.102 - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n2 + - host: n3 + - host: n4 + - host: n5 pump_servers: - - host: __IPPREFIX__.103 - - host: __IPPREFIX__.104 - - host: __IPPREFIX__.105 + - host: n3 + - host: n4 + - host: n5 drainer_servers: - - host: __IPPREFIX__.101 + - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" monitoring_servers: - - host: __IPPREFIX__.101 + - host: n1 grafana_servers: - - host: __IPPREFIX__.101 + - host: n1 alertmanager_servers: - - host: __IPPREFIX__.101 + - host: n1