Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
xhebox authored Jul 14, 2023
2 parents fccbe63 + 7b36b25 commit 65b8f51
Show file tree
Hide file tree
Showing 39 changed files with 325 additions and 139 deletions.
21 changes: 21 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,26 @@
TiUP Changelog

## [1.12.4] 2023-7-13

### Fix

- Fix cannot show tiflash uptime in `tiup-cluster` (#2227, @nexustar)

### Improvement

- Remove tcp_port for tiflash in `tiup-cluster` and `tiup-playground` (#2220, @zanmato1984)

## [1.12.3] 2023-6-14

### Fixes

- Fix cannot edit manage_host on an exist cluster in `tiup-cluster` (#2210, @nexustar)
- Fix still use host instead of manage_host in `tiup-cluster` (#2206 #2207, @nexustar)

### Improvement

- Check if the compnoent exists when uninstall in `tiup` (#2209, @srstack)

## [1.12.2] 2023-5-19

### Notes
Expand Down
10 changes: 9 additions & 1 deletion cmd/uninstall.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ which is used to uninstall tiup.
teleCommand = cmd.CommandPath()
env := environment.GlobalEnv()
if self {
deletable := []string{"bin", "manifest", "manifests", "components", "storage/cluster/packages"}
deletable := []string{"storage/cluster/packages", "components", "manifests", "manifest", "bin"}
for _, dir := range deletable {
if err := os.RemoveAll(env.Profile().Path(dir)); err != nil {
return errors.Trace(err)
Expand Down Expand Up @@ -86,6 +86,9 @@ func removeComponents(env *environment.Environment, specs []string, all bool) er
if strings.Contains(spec, ":") {
parts := strings.SplitN(spec, ":", 2)
// after this version is deleted, component will have no version left. delete the whole component dir directly
if !utils.IsExist(env.LocalPath(localdata.ComponentParentDir, parts[0])) {
return errors.Trace(fmt.Errorf("component `%s` is not installed, please use `tiup list %s` to check", parts[0], parts[0]))
}
dir, err := os.ReadDir(env.LocalPath(localdata.ComponentParentDir, parts[0]))
if err != nil {
return errors.Trace(err)
Expand All @@ -99,6 +102,7 @@ func removeComponents(env *environment.Environment, specs []string, all bool) er
} else {
paths = append(paths, env.LocalPath(localdata.ComponentParentDir, parts[0], parts[1]))
}
// if no more version left, delete the whole component dir
if len(dir)-len(paths) < 1 {
paths = append(paths, env.LocalPath(localdata.ComponentParentDir, parts[0]))
}
Expand All @@ -110,6 +114,10 @@ func removeComponents(env *environment.Environment, specs []string, all bool) er
paths = append(paths, env.LocalPath(localdata.ComponentParentDir, spec))
}
for _, path := range paths {
if !utils.IsExist(path) {
return errors.Trace(fmt.Errorf("component `%s` is not installed, please check", spec))
}
fmt.Println(path)
if err := os.RemoveAll(path); err != nil {
return errors.Trace(err)
}
Expand Down
2 changes: 1 addition & 1 deletion components/dm/command/prune.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func clearOutDatedEtcdInfo(clusterName string, metadata *spec.Metadata, opt oper
if err != nil {
return err
}
dmMasterClient := api.NewDMMasterClient(topo.GetMasterList(), 10*time.Second, tlsCfg)
dmMasterClient := api.NewDMMasterClient(topo.GetMasterListWithManageHost(), 10*time.Second, tlsCfg)
registeredMasters, registeredWorkers, err := dmMasterClient.GetRegisteredMembers()
if err != nil {
return err
Expand Down
3 changes: 2 additions & 1 deletion components/dm/command/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/pingcap/tiup/pkg/cluster/spec"
"github.com/pingcap/tiup/pkg/cluster/task"
"github.com/pingcap/tiup/pkg/set"
"github.com/pingcap/tiup/pkg/utils"
"github.com/spf13/cobra"
)

Expand Down Expand Up @@ -126,7 +127,7 @@ func ScaleInDMCluster(
var dmMasterEndpoint []string
for _, instance := range (&dm.DMMasterComponent{Topology: topo}).Instances() {
if !deletedNodes.Exist(instance.ID()) {
dmMasterEndpoint = append(dmMasterEndpoint, operator.Addr(instance))
dmMasterEndpoint = append(dmMasterEndpoint, utils.JoinHostPort(instance.GetManageHost(), instance.GetPort()))
}
}

Expand Down
16 changes: 10 additions & 6 deletions components/dm/spec/topology_dm.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func AllDMComponentNames() (roles []string) {
// MasterSpec represents the Master topology specification in topology.yaml
type MasterSpec struct {
Host string `yaml:"host"`
ManageHost string `yaml:"manage_host,omitempty"`
ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"`
SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"`
Imported bool `yaml:"imported,omitempty"`
Patched bool `yaml:"patched,omitempty"`
Expand Down Expand Up @@ -205,7 +205,7 @@ func (s *MasterSpec) GetAdvertisePeerURL(enableTLS bool) string {
// WorkerSpec represents the Master topology specification in topology.yaml
type WorkerSpec struct {
Host string `yaml:"host"`
ManageHost string `yaml:"manage_host,omitempty"`
ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"`
SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"`
Imported bool `yaml:"imported,omitempty"`
Patched bool `yaml:"patched,omitempty"`
Expand Down Expand Up @@ -675,7 +675,7 @@ func (s *Specification) BaseTopo() *spec.BaseTopo {
return &spec.BaseTopo{
GlobalOptions: &s.GlobalOptions,
MonitoredOptions: s.GetMonitoredOptions(),
MasterList: s.GetMasterList(),
MasterList: s.GetMasterListWithManageHost(),
Monitors: s.Monitors,
Grafanas: s.Grafanas,
Alertmanagers: s.Alertmanagers,
Expand All @@ -701,12 +701,16 @@ func (s *Specification) MergeTopo(rhs spec.Topology) spec.Topology {
return s.Merge(other)
}

// GetMasterList returns a list of Master API hosts of the current cluster
func (s *Specification) GetMasterList() []string {
// GetMasterListWithManageHost returns a list of Master API hosts of the current cluster
func (s *Specification) GetMasterListWithManageHost() []string {
var masterList []string

for _, master := range s.Masters {
masterList = append(masterList, utils.JoinHostPort(master.Host, master.Port))
host := master.Host
if master.ManageHost != "" {
host = master.ManageHost
}
masterList = append(masterList, utils.JoinHostPort(host, master.Port))
}

return masterList
Expand Down
1 change: 0 additions & 1 deletion components/playground/instance/tiflash.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ func (inst *TiFlashInstance) Start(ctx context.Context, version utils.Version) e
fmt.Sprintf("--tmp_path=%s", filepath.Join(inst.Dir, "tmp")),
fmt.Sprintf("--path=%s", filepath.Join(inst.Dir, "data")),
fmt.Sprintf("--listen_host=%s", inst.Host),
fmt.Sprintf("--tcp_port=%d", inst.TCPPort),
fmt.Sprintf("--logger.log=%s", inst.LogFile()),
fmt.Sprintf("--logger.errorlog=%s", filepath.Join(inst.Dir, "tiflash_error.log")),
fmt.Sprintf("--status.metrics_port=%d", inst.StatusPort),
Expand Down
8 changes: 4 additions & 4 deletions components/playground/instance/tiflash_pre7_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ const tiflashMarkCacheSizeOld = `mark_cache_size = 5368709120`
const tiflashConfigOld = `
default_profile = "default"
display_name = "TiFlash"
%[2]s
http_port = %[2]d
listen_host = "0.0.0.0"
path = "%[5]s"
tcp_port = %[3]d
path = "%[5]s"
tmp_path = "%[6]s"
%[14]s
%[13]s
Expand Down Expand Up @@ -109,11 +109,11 @@ func writeTiFlashConfigOld(w io.Writer, version utils.Version, tcpPort, httpPort
var conf string

if tidbver.TiFlashNotNeedSomeConfig(version.String()) {
conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, fmt.Sprintf(`http_port = %d`, httpPort), tcpPort,
conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, httpPort, tcpPort,
deployDir, dataDir, tmpDir, logDir, servicePort, metricsPort,
ip, strings.Join(tidbStatusAddrs, ","), clusterManagerPath, "", "")
} else {
conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, fmt.Sprintf(`http_port = %d`, httpPort), tcpPort,
conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, httpPort, tcpPort,
deployDir, dataDir, tmpDir, logDir, servicePort, metricsPort,
ip, strings.Join(tidbStatusAddrs, ","), clusterManagerPath, tiflashDaemonConfigOld, tiflashMarkCacheSizeOld)
}
Expand Down
2 changes: 1 addition & 1 deletion components/playground/playground.go
Original file line number Diff line number Diff line change
Expand Up @@ -913,7 +913,7 @@ func (p *Playground) bootCluster(ctx context.Context, env *environment.Environme
return fmt.Errorf("TiDB cluster doesn't support disaggregated mode in version %s", options.Version)
}
if !tidbver.TiFlashPlaygroundNewStartMode(options.Version) {
// For simplicitly, currently we only implemented disagg mode when TiFlash can run without config.
// For simplicity, currently we only implemented disagg mode when TiFlash can run without config.
return fmt.Errorf("TiUP playground only supports disaggregated mode for TiDB cluster >= v7.1.0 (or nightly)")
}

Expand Down
1 change: 1 addition & 0 deletions embed/examples/cluster/minimal.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ tiflash_servers:
# # SSH port of the server.
# ssh_port: 22
# # TiFlash TCP Service port.
# # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity.
# tcp_port: 9000
# # TiFlash raft service and coprocessor service listening address.
# flash_service_port: 3930
Expand Down
1 change: 1 addition & 0 deletions embed/examples/cluster/multi-dc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ tiflash_servers:
# # SSH port of the server.
# ssh_port: 22
# # TiFlash TCP Service port.
# # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity.
# tcp_port: 9000
# # TiFlash raft service and coprocessor service listening address.
# flash_service_port: 3930
Expand Down
1 change: 1 addition & 0 deletions embed/examples/cluster/topology.example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ tiflash_servers:
# # SSH port of the server.
# ssh_port: 22
# # TiFlash TCP Service port.
# # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity.
tcp_port: 9000
# # TiFlash raft service and coprocessor service listening address.
flash_service_port: 3930
Expand Down
1 change: 0 additions & 1 deletion pkg/cluster/ansible/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ default_profile = "default"
display_name = "TiFlash"
listen_host = "0.0.0.0"
path = "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db"
tcp_port = 11315
tmp_path = "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db/tmp"
[flash]
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/manager/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ func (m *Manager) checkRegionsInfo(clusterName string, topo *spec.Specification,
}
pdClient := api.NewPDClient(
context.WithValue(context.TODO(), logprinter.ContextKeyLogger, m.logger),
topo.GetPDList(),
topo.GetPDListWithManageHost(),
time.Second*time.Duration(gOpt.APITimeout),
tlsConfig,
)
Expand Down
46 changes: 5 additions & 41 deletions pkg/cluster/manager/display.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ import (
"github.com/pingcap/tiup/pkg/set"
"github.com/pingcap/tiup/pkg/tui"
"github.com/pingcap/tiup/pkg/utils"
"go.uber.org/zap"
)

// DisplayOption represents option of display command
Expand Down Expand Up @@ -233,7 +232,7 @@ func (m *Manager) Display(dopt DisplayOption, opt operator.Options) error {
continue
}
if strings.HasPrefix(v.Status, "Up") || strings.HasPrefix(v.Status, "Healthy") {
instAddr := utils.JoinHostPort(v.Host, v.Port)
instAddr := utils.JoinHostPort(v.ManageHost, v.Port)
masterActive = append(masterActive, instAddr)
}
}
Expand Down Expand Up @@ -610,8 +609,9 @@ func (m *Manager) GetClusterTopology(dopt DisplayOption, opt operator.Options) (
e, found := ctxt.GetInner(ctx).GetExecutor(ins.GetManageHost())
if found {
var active string
var systemdSince time.Duration
nctx := checkpoint.NewContext(ctx)
active, memory, _ = operator.GetServiceStatus(nctx, e, ins.ServiceName())
active, memory, systemdSince, _ = operator.GetServiceStatus(nctx, e, ins.ServiceName())
if status == "-" {
if active == "active" {
status = "Up"
Expand All @@ -620,7 +620,7 @@ func (m *Manager) GetClusterTopology(dopt DisplayOption, opt operator.Options) (
}
}
if dopt.ShowUptime && since == "-" {
since = formatInstanceSince(parseSystemctlSince(active))
since = formatInstanceSince(systemdSince)
}
}
}
Expand Down Expand Up @@ -733,37 +733,6 @@ func formatInstanceSince(uptime time.Duration) string {
return strings.Join(parts, "")
}

// `systemctl status xxx.service` returns as below
// Active: active (running) since Sat 2021-03-27 10:51:11 CST; 41min ago
func parseSystemctlSince(str string) (dur time.Duration) {
// if service is not found or other error, don't need to parse it
if str == "" {
return 0
}
defer func() {
if dur == 0 {
zap.L().Warn("failed to parse systemctl since", zap.String("value", str))
}
}()
parts := strings.Split(str, ";")
if len(parts) != 2 {
return
}
parts = strings.Split(parts[0], " ")
if len(parts) < 3 {
return
}

dateStr := strings.Join(parts[len(parts)-3:], " ")

tm, err := time.Parse("2006-01-02 15:04:05 MST", dateStr)
if err != nil {
return
}

return time.Since(tm)
}

// SetSSHKeySet set ssh key set.
func SetSSHKeySet(ctx context.Context, privateKeyPath string, publicKeyPath string) error {
ctxt.GetInner(ctx).PrivateKeyPath = privateKeyPath
Expand Down Expand Up @@ -809,13 +778,8 @@ func (m *Manager) DisplayDashboardInfo(clusterName string, timeout time.Duration
return err
}

pdEndpoints := make([]string, 0)
for _, pd := range metadata.Topology.PDServers {
pdEndpoints = append(pdEndpoints, utils.JoinHostPort(pd.Host, pd.ClientPort))
}

ctx := context.WithValue(context.Background(), logprinter.ContextKeyLogger, m.logger)
pdAPI := api.NewPDClient(ctx, pdEndpoints, timeout, tlsCfg)
pdAPI := api.NewPDClient(ctx, metadata.Topology.GetPDListWithManageHost(), timeout, tlsCfg)
dashboardAddr, err := pdAPI.GetDashboardAddress()
if err != nil {
return fmt.Errorf("failed to retrieve TiDB Dashboard instance from PD: %s", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/operation/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ func CheckServices(ctx context.Context, e ctxt.Executor, host, service string, d
return result
}

active, _, err := GetServiceStatus(ctx, e, service+".service")
active, _, _, err := GetServiceStatus(ctx, e, service+".service")
if err != nil {
result.Err = err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/operation/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@ func DestroyClusterTombstone(
pdEndpoints = strings.Split(forcePDEndpoints, ",")
logger.Warnf("%s is set, using %s as PD endpoints", EnvNamePDEndpointOverwrite, pdEndpoints)
} else {
pdEndpoints = cluster.GetPDList()
pdEndpoints = cluster.GetPDListWithManageHost()
}

var pdClient = api.NewPDClient(ctx, pdEndpoints, 10*time.Second, tlsCfg)
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/operation/scale_in.go
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ func scaleInCDC(
deferInstances := make([]spec.Instance, 0, 1)
for _, instance := range instances {
address := instance.(*spec.CDCInstance).GetAddr()
client := api.NewCDCOpenAPIClient(ctx, []string{address}, 5*time.Second, tlsCfg)
client := api.NewCDCOpenAPIClient(ctx, []string{utils.JoinHostPort(instance.GetManageHost(), instance.GetPort())}, 5*time.Second, tlsCfg)

capture, err := client.GetCaptureByAddr(address)
if err != nil {
Expand Down
Loading

0 comments on commit 65b8f51

Please sign in to comment.