Skip to content

Commit

Permalink
Refine cluster list and display commands (#1139)
Browse files Browse the repository at this point in the history
* refine ListCluster method, extract GetClusterList

* refine Display method, extract GetClusterTopology

* refine

Co-authored-by: Ti Chi Robot <71242396+ti-chi-bot@users.noreply.github.com>
  • Loading branch information
baurine and ti-chi-bot authored Feb 19, 2021
1 parent b769ff8 commit 30b7746
Show file tree
Hide file tree
Showing 2 changed files with 158 additions and 62 deletions.
170 changes: 118 additions & 52 deletions pkg/cluster/manager/display.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,29 @@ import (
"github.com/pingcap/tiup/pkg/utils"
)

// InstInfo represents an instance info
type InstInfo struct {
ID string `json:"id"`
Role string `json:"role"`
Host string `json:"host"`
Ports string `json:"ports"`
OsArch string `json:"os_arch"`
Status string `json:"status"`
DataDir string `json:"data_dir"`
DeployDir string `json:"deploy_dir"`

ComponentName string
Port int
}

// Display cluster meta and topology.
func (m *Manager) Display(name string, opt operator.Options) error {
metadata, err := m.meta(name)
if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&
!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {
clusterInstInfos, err := m.GetClusterTopology(name, opt)
if err != nil {
return err
}

metadata, _ := m.meta(name)
topo := metadata.GetTopology()
base := metadata.GetBaseMeta()
// display cluster meta
Expand Down Expand Up @@ -71,24 +86,97 @@ func (m *Manager) Display(name string, opt operator.Options) error {
// Header
{"ID", "Role", "Host", "Ports", "OS/Arch", "Status", "Data Dir", "Deploy Dir"},
}
masterActive := make([]string, 0)
for _, v := range clusterInstInfos {
clusterTable = append(clusterTable, []string{
color.CyanString(v.ID),
v.Role,
v.Host,
v.Ports,
v.OsArch,
formatInstanceStatus(v.Status),
v.DataDir,
v.DeployDir,
})

if v.ComponentName != spec.ComponentPD && v.ComponentName != spec.ComponentDMMaster {
continue
}
if strings.HasPrefix(v.Status, "Up") || strings.HasPrefix(v.Status, "Healthy") {
instAddr := fmt.Sprintf("%s:%d", v.Host, v.Port)
masterActive = append(masterActive, instAddr)
}
}

tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir))
if err != nil {
return err
}

var dashboardAddr string
if t, ok := topo.(*spec.Specification); ok {
var err error
dashboardAddr, err = t.GetDashboardAddress(tlsCfg, masterActive...)
if err == nil && !set.NewStringSet("", "auto", "none").Exist(dashboardAddr) {
schema := "http"
if tlsCfg != nil {
schema = "https"
}
fmt.Printf("Dashboard URL: %s\n", cyan.Sprintf("%s://%s/dashboard", schema, dashboardAddr))
}
}

cliutil.PrintTable(clusterTable, true)
fmt.Printf("Total nodes: %d\n", len(clusterTable)-1)

ctx := ctxt.New(context.Background())
if t, ok := topo.(*spec.Specification); ok {
// Check if TiKV's label set correctly
pdClient := api.NewPDClient(masterActive, 10*time.Second, tlsCfg)
if lbs, err := pdClient.GetLocationLabels(); err != nil {
log.Debugf("get location labels from pd failed: %v", err)
} else if err := spec.CheckTiKVLabels(lbs, pdClient); err != nil {
color.Yellow("\nWARN: there is something wrong with TiKV labels, which may cause data losing:\n%v", err)
}

// Check if there is some instance in tombstone state
nodes, _ := operator.DestroyTombstone(ctx, t, true /* returnNodesOnly */, opt, tlsCfg)
if len(nodes) != 0 {
color.Green("There are some nodes can be pruned: \n\tNodes: %+v\n\tYou can destroy them with the command: `tiup cluster prune %s`", nodes, name)
}
}

return nil
}

// GetClusterTopology get the topology of the cluster.
func (m *Manager) GetClusterTopology(name string, opt operator.Options) ([]InstInfo, error) {
ctx := ctxt.New(context.Background())
metadata, err := m.meta(name)
if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&
!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {
return nil, err
}

topo := metadata.GetTopology()
base := metadata.GetBaseMeta()

err = SetSSHKeySet(ctx, m.specManager.Path(name, "ssh", "id_rsa"), m.specManager.Path(name, "ssh", "id_rsa.pub"))
if err != nil {
return err
return nil, err
}

err = SetClusterSSH(ctx, topo, base.User, opt.SSHTimeout, opt.SSHType, topo.BaseTopo().GlobalOptions.SSHType)
if err != nil {
return err
return nil, err
}

filterRoles := set.NewStringSet(opt.Roles...)
filterNodes := set.NewStringSet(opt.Nodes...)
masterList := topo.BaseTopo().MasterList
tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir))
if err != nil {
return err
return nil, err
}

masterActive := make([]string, 0)
Expand All @@ -108,17 +196,11 @@ func (m *Manager) Display(name string, opt operator.Options) error {

var dashboardAddr string
if t, ok := topo.(*spec.Specification); ok {
var err error
dashboardAddr, err = t.GetDashboardAddress(tlsCfg, masterActive...)
if err == nil && !set.NewStringSet("", "auto", "none").Exist(dashboardAddr) {
schema := "http"
if tlsCfg != nil {
schema = "https"
}
fmt.Printf("Dashboard URL: %s\n", cyan.Sprintf("%s://%s/dashboard", schema, dashboardAddr))
}
dashboardAddr, _ = t.GetDashboardAddress(tlsCfg, masterActive...)
}

clusterInstInfos := []InstInfo{}

topo.IterInstance(func(ins spec.Instance) {
// apply role filter
if len(filterRoles) > 0 && !filterRoles.Exist(ins.Role()) {
Expand Down Expand Up @@ -164,50 +246,34 @@ func (m *Manager) Display(name string, opt operator.Options) error {
}
}
}
clusterTable = append(clusterTable, []string{
color.CyanString(ins.ID()),
ins.Role(),
ins.GetHost(),
utils.JoinInt(ins.UsedPorts(), "/"),
cliutil.OsArch(ins.OS(), ins.Arch()),
formatInstanceStatus(status),
dataDir,
deployDir,

clusterInstInfos = append(clusterInstInfos, InstInfo{
ID: ins.ID(),
Role: ins.Role(),
Host: ins.GetHost(),
Ports: utils.JoinInt(ins.UsedPorts(), "/"),
OsArch: cliutil.OsArch(ins.OS(), ins.Arch()),
Status: status,
DataDir: dataDir,
DeployDir: deployDir,
ComponentName: ins.ComponentName(),
Port: ins.GetPort(),
})
})

// Sort by role,host,ports
sort.Slice(clusterTable[1:], func(i, j int) bool {
lhs, rhs := clusterTable[i+1], clusterTable[j+1]
// column: 1 => role, 2 => host, 3 => ports
for _, col := range []int{1, 2} {
if lhs[col] != rhs[col] {
return lhs[col] < rhs[col]
}
sort.Slice(clusterInstInfos, func(i, j int) bool {
lhs, rhs := clusterInstInfos[i], clusterInstInfos[j]
if lhs.Role != rhs.Role {
return lhs.Role < rhs.Role
}
return lhs[3] < rhs[3]
})

cliutil.PrintTable(clusterTable, true)
fmt.Printf("Total nodes: %d\n", len(clusterTable)-1)

if t, ok := topo.(*spec.Specification); ok {
// Check if TiKV's label set correctly
pdClient := api.NewPDClient(masterActive, 10*time.Second, tlsCfg)
if lbs, err := pdClient.GetLocationLabels(); err != nil {
log.Debugf("get location labels from pd failed: %v", err)
} else if err := spec.CheckTiKVLabels(lbs, pdClient); err != nil {
color.Yellow("\nWARN: there is something wrong with TiKV labels, which may cause data losing:\n%v", err)
}

// Check if there is some instance in tombstone state
nodes, _ := operator.DestroyTombstone(ctx, t, true /* returnNodesOnly */, opt, tlsCfg)
if len(nodes) != 0 {
color.Green("There are some nodes can be pruned: \n\tNodes: %+v\n\tYou can destroy them with the command: `tiup cluster prune %s`", nodes, name)
if lhs.Host != rhs.Host {
return lhs.Host < rhs.Host
}
}
return lhs.Ports < rhs.Ports
})

return nil
return clusterInstInfos, nil
}

func formatInstanceStatus(status string) string {
Expand Down
50 changes: 40 additions & 10 deletions pkg/cluster/manager/list.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,18 @@ import (
"github.com/pingcap/tiup/pkg/meta"
)

// Cluster represents a clsuter
type Cluster struct {
Name string `json:"name"`
User string `json:"user"`
Version string `json:"version"`
Path string `json:"path"`
PrivateKey string `json:"private_key"`
}

// ListCluster list the clusters.
func (m *Manager) ListCluster() error {
names, err := m.specManager.List()
clusters, err := m.GetClusterList()
if err != nil {
return err
}
Expand All @@ -33,25 +42,46 @@ func (m *Manager) ListCluster() error {
// Header
{"Name", "User", "Version", "Path", "PrivateKey"},
}
for _, v := range clusters {
clusterTable = append(clusterTable, []string{
v.Name,
v.User,
v.Version,
v.Path,
v.PrivateKey,
})
}

cliutil.PrintTable(clusterTable, true)
return nil
}

// GetClusterList get the clusters list.
func (m *Manager) GetClusterList() ([]Cluster, error) {
names, err := m.specManager.List()
if err != nil {
return nil, err
}

var clusters = []Cluster{}

for _, name := range names {
metadata, err := m.meta(name)
if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) &&
!errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) {
return perrs.Trace(err)
return nil, perrs.Trace(err)
}

base := metadata.GetBaseMeta()

clusterTable = append(clusterTable, []string{
name,
base.User,
base.Version,
m.specManager.Path(name),
m.specManager.Path(name, "ssh", "id_rsa"),
clusters = append(clusters, Cluster{
Name: name,
User: base.User,
Version: base.Version,
Path: m.specManager.Path(name),
PrivateKey: m.specManager.Path(name, "ssh", "id_rsa"),
})
}

cliutil.PrintTable(clusterTable, true)
return nil
return clusters, nil
}

0 comments on commit 30b7746

Please sign in to comment.