diff --git a/README.md b/README.md index d6cdd3810cc..9ffa9dd91bc 100755 --- a/README.md +++ b/README.md @@ -36,7 +36,8 @@ ## Linux 发行版, CPU架构 -- Ubuntu 16.04, 18.04, 20.04 , x86_64/ arm64 +- Debian 9+, x86_64/ arm64 +- Ubuntu 16.04, 18.04, 20.04, x86_64/ arm64 - Centos/RHEL 7.6+, x86_64/ arm64 - 其他支持 systemd 的系统环境. x86_64/ arm64 - Kylin arm64 @@ -69,6 +70,8 @@ - 确保 /etc/resolv.conf 中的 DNS 地址可用。否则,可能会导致群集中coredns异常。 - 如果使用阿里云/华为云主机部署。 默认的pod网段会和阿里云的dns网段冲突, 建议自定义修改pod网段, 在init的时候指定`--podcidr` 来修改。 - sealos 默认会关闭防火墙, 如果需要打开防火墙, 建议手动放行相关的端口。 + - 内核要求: + - cni组件选择cilium时要求内核版本不低于5.4 # 🚀 快速开始 diff --git a/cmd/init.go b/cmd/init.go index ad7b0185fa9..8b547036ee9 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -110,7 +110,7 @@ func init() { // Here you will define your flags and configuration settings. initCmd.Flags().StringVar(&install.SSHConfig.User, "user", "root", "servers user name for ssh") initCmd.Flags().StringVar(&install.SSHConfig.Password, "passwd", "", "password for ssh") - initCmd.Flags().StringVar(&install.SSHConfig.PkFile, "pk", cert.GetUserHomeDir() + "/.ssh/id_rsa", "private key for ssh") + initCmd.Flags().StringVar(&install.SSHConfig.PkFile, "pk", cert.GetUserHomeDir()+"/.ssh/id_rsa", "private key for ssh") initCmd.Flags().StringVar(&install.SSHConfig.PkPassword, "pk-passwd", "", "private key password for ssh") initCmd.Flags().StringVar(&install.KubeadmFile, "kubeadm-config", "", "kubeadm-config.yaml template file") @@ -132,7 +132,6 @@ func init() { initCmd.Flags().StringVar(&install.Network, "network", "calico", "cni plugin, calico..") initCmd.Flags().BoolVar(&install.IPIP, "ipip", true, "ipip mode enable, calico..") initCmd.Flags().StringVar(&install.MTU, "mtu", "1440", "mtu of the ipip mode , calico..") - initCmd.Flags().StringVar(&install.LvscareImage.Image, "lvscare-image", "fanux/lvscare", "lvscare image name") initCmd.Flags().StringVar(&install.LvscareImage.Tag, "lvscare-tag", "latest", "lvscare image tag name") diff --git a/cmd/ipvs.go b/cmd/ipvs.go index 7282fa7824d..b2be75cfa17 100644 --- a/cmd/ipvs.go +++ b/cmd/ipvs.go @@ -37,7 +37,7 @@ func init() { // Here you will define your flags and configuration settings. ipvsCmd.Flags().BoolVar(&install.Ipvs.RunOnce, "run-once", false, "is run once mode") - ipvsCmd.Flags().BoolVarP(&install.Ipvs.Clean, "clean","c", true, " clean Vip ipvs rule before join node, if Vip has no ipvs rule do nothing.") + ipvsCmd.Flags().BoolVarP(&install.Ipvs.Clean, "clean", "c", true, " clean Vip ipvs rule before join node, if Vip has no ipvs rule do nothing.") ipvsCmd.Flags().StringVar(&install.Ipvs.VirtualServer, "vs", "", "virturl server like 10.54.0.2:6443") ipvsCmd.Flags().StringSliceVar(&install.Ipvs.RealServer, "rs", []string{}, "virturl server like 192.168.0.2:6443") diff --git a/cmd/root.go b/cmd/root.go index 4312802e936..d9d5bba7227 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -67,7 +67,7 @@ func initConfig() { home := cert.GetUserHomeDir() logFile := fmt.Sprintf("%s/.sealos/sealos.log", home) if !install.FileExist(home + "/.sealos") { - err := os.MkdirAll(home + "/.sealos",os.ModePerm) + err := os.MkdirAll(home+"/.sealos", os.ModePerm) if err != nil { fmt.Println("create default sealos config dir failed, please create it by your self mkdir -p /root/.sealos && touch /root/.sealos/config.yaml") } diff --git a/go.mod b/go.mod index 0819098d1d5..d6beee4a6ca 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/fanux/lvscare v0.0.0-00010101000000-000000000000 github.com/fanux/sealgate v0.0.5 - github.com/ghodss/yaml v1.0.0 github.com/google/uuid v1.1.2 // indirect github.com/linuxsuren/cobra-extension v0.0.8 github.com/pkg/errors v0.9.1 @@ -28,6 +27,7 @@ require ( k8s.io/api v0.18.0 k8s.io/apimachinery v0.18.0 k8s.io/client-go v0.18.0 + sigs.k8s.io/yaml v1.2.0 ) replace ( diff --git a/install/check.go b/install/check.go index 6d2f40a7920..bc172361408 100644 --- a/install/check.go +++ b/install/check.go @@ -49,5 +49,15 @@ func (s *SealosInstaller) CheckValid() { } logger.Info("[%s] ------------ check ok", h) } + if s.Network == "cilium" { + if err := SSHConfig.CmdAsync(h, "uname -r | grep 5 | awk -F. '{if($2>3)print \"ok\"}' | grep ok && exit 0 || exit 1"); err != nil { + logger.Error("[%s] ------------ check kernel version < 5.3", h) + os.Exit(1) + } + if err := SSHConfig.CmdAsync(h, "mount bpffs -t bpf /sys/fs/bpf && mount | grep /sys/fs/bpf && exit 0 || exit 1"); err != nil { + logger.Error("[%s] ------------ mount bpffs err", h) + os.Exit(1) + } + } } } diff --git a/install/config.go b/install/config.go index 5d62cacbb1c..928774f238e 100644 --- a/install/config.go +++ b/install/config.go @@ -28,13 +28,13 @@ type SealConfig struct { PkPassword string //ApiServer ex. apiserver.cluster.local ApiServerDomain string - - VIP string - PkgURL string - Version string - Repo string - PodCIDR string - SvcCIDR string + Network string + VIP string + PkgURL string + Version string + Repo string + PodCIDR string + SvcCIDR string //certs location CertPath string CertEtcdPath string @@ -59,6 +59,7 @@ func (c *SealConfig) Dump(path string) { c.PrivateKey = SSHConfig.PkFile c.PkPassword = SSHConfig.PkPassword c.ApiServerDomain = ApiServer + c.Network = Network c.VIP = VIP c.PkgURL = PkgUrl c.Version = Version @@ -135,13 +136,13 @@ func (c *SealConfig) Load(path string) (err error) { SSHConfig.PkFile = c.PrivateKey SSHConfig.PkPassword = c.PkPassword ApiServer = c.ApiServerDomain + Network = c.Network VIP = c.VIP PkgUrl = c.PkgURL Version = c.Version Repo = c.Repo PodCIDR = c.PodCIDR SvcCIDR = c.SvcCIDR - DnsDomain = c.DnsDomain ApiServerCertSANs = c.ApiServerCertSANs CertPath = c.CertPath @@ -184,6 +185,7 @@ func (c *SealConfig) ShowDefaultConfig() { c.Passwd = "123456" c.PrivateKey = home + "/.ssh/id_rsa" c.ApiServerDomain = "apiserver.cluster.local" + c.Network = "calico" c.VIP = "10.103.97.2" c.PkgURL = home + "/kube1.17.13.tar.gz" c.Version = "v1.17.13" diff --git a/install/constants.go b/install/constants.go index 30b82d7fbcb..df1f671df23 100644 --- a/install/constants.go +++ b/install/constants.go @@ -69,6 +69,9 @@ controllerManager: extraArgs: feature-gates: TTLAfterFinished=true experimental-cluster-signing-duration: 876000h +{{- if eq .Network "cilium" }} + allocate-node-cidrs: \"true\" +{{- end }} extraVolumes: - hostPath: /etc/localtime mountPath: /etc/localtime @@ -159,6 +162,9 @@ controllerManager: extraArgs: feature-gates: TTLAfterFinished=true experimental-cluster-signing-duration: 876000h +{{- if eq .Network "cilium" }} + allocate-node-cidrs: \"true\" +{{- end }} extraVolumes: - hostPath: /etc/localtime mountPath: /etc/localtime diff --git a/install/etcd_save_test.go b/install/etcd_save_test.go index 254e52d3179..e9bd2e87293 100644 --- a/install/etcd_save_test.go +++ b/install/etcd_save_test.go @@ -15,8 +15,8 @@ func Test_reFormatHostToIp(t *testing.T) { args args want string }{ - {"test",args{"192.168.0.22:22"},"192.168.0.22"}, - {"test02",args{"192.168.0.22"},"192.168.0.22"}, + {"test", args{"192.168.0.22:22"}, "192.168.0.22"}, + {"test02", args{"192.168.0.22"}, "192.168.0.22"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -26,7 +26,6 @@ func Test_reFormatHostToIp(t *testing.T) { }) } - u := fmt.Sprintf("%v", time.Now().Unix()) fmt.Println(u) -} \ No newline at end of file +} diff --git a/install/generator.go b/install/generator.go index 0bf4cfcde0c..364f1275fe7 100644 --- a/install/generator.go +++ b/install/generator.go @@ -3,8 +3,8 @@ package install import ( "bytes" "fmt" - "github.com/ghodss/yaml" "github.com/wonderivan/logger" + "sigs.k8s.io/yaml" "strings" "text/template" ) @@ -112,6 +112,7 @@ func TemplateFromTemplateContent(templateContent string) []byte { envMap["Repo"] = Repo envMap["Master0"] = IpFormat(MasterIPs[0]) envMap["CgroupDriver"] = CgroupDriver + envMap["Network"] = Network var buffer bytes.Buffer _ = tmpl.Execute(&buffer, envMap) return buffer.Bytes() diff --git a/install/generator_test.go b/install/generator_test.go index fe0b3e7267e..ecb084a0731 100644 --- a/install/generator_test.go +++ b/install/generator_test.go @@ -24,6 +24,24 @@ func TestTemplate(t *testing.T) { t.Log(string(Template())) } +func TestNetCiliumTemplate(t *testing.T) { + var masters = []string{"172.20.241.205:22", "172.20.241.206:22", "172.20.241.207:22"} + var vip = "10.103.97.1" + MasterIPs = masters + VIP = vip + ApiServer = "apiserver.cluster.local" + Version = "1.20.5" + Network = "cilium" + t.Log(string(Template())) + Network = "calico" + t.Log(string(Template())) + Version = "1.18.5" + Network = "cilium" + t.Log(string(Template())) + Network = "calico" + t.Log(string(Template())) +} + var testYaml = `apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration kubernetesVersion: v1.18.0 @@ -123,8 +141,8 @@ func TestJoinTemplate(t *testing.T) { TokenCaCertHash = "sha256:a68c79c87368ff794ae50c5fd6a8ce13fdb2778764f1080614ddfeaa0e2b9d14" VIP = vip - config.Cmd("127.0.0.1", "echo \""+string(JoinTemplate(IpFormat(masters[0]), "systemd"))+"\" > ~/aa") - t.Log(string(JoinTemplate(IpFormat(masters[0]), "cgroupfs"))) + config.Cmd("127.0.0.1", "echo \""+string(JoinTemplate(IpFormat(masters[0]), ""))+"\" > ~/aa") + t.Log(string(JoinTemplate(IpFormat(masters[0]), ""))) Version = "v1.19.0" config.Cmd("127.0.0.1", "echo \""+string(JoinTemplate("", "systemd"))+"\" > ~/aa") @@ -145,4 +163,4 @@ controlPlane: localAPIEndpoint: advertiseAddress: {{.Master}} bindPort: 6443 -` \ No newline at end of file +` diff --git a/install/init.go b/install/init.go index d8ac4846942..8a54d631805 100644 --- a/install/init.go +++ b/install/init.go @@ -25,9 +25,11 @@ func BuildInit() { nodes := NodeIPs hosts := append(masters, nodes...) i := &SealosInstaller{ - Hosts: hosts, - Masters: masters, - Nodes: nodes, + Hosts: hosts, + Masters: masters, + Nodes: nodes, + Network: Network, + ApiServer: ApiServer, } i.CheckValid() i.Print() @@ -197,7 +199,7 @@ func (s *SealosInstaller) InstallMaster0() { } //cmd = `kubectl apply -f /root/kube/conf/net/calico.yaml || true` - // can-reach is used by calico multi network , flannel has nothing to add. just Use it. + // can-reach is used by calico multi network , flannel has nothing to add. just Use it. if k8s.IsIpv4(Interface) && Network == "calico" { Interface = "can-reach=" + Interface } else { @@ -205,10 +207,12 @@ func (s *SealosInstaller) InstallMaster0() { } netyaml := net.NewNetwork(Network, net.MetaData{ - Interface: Interface, - CIDR: PodCIDR, - IPIP: IPIP, - MTU: MTU, + Interface: Interface, + CIDR: PodCIDR, + IPIP: IPIP, + MTU: MTU, + CniRepo: Repo, + K8sServiceHost: s.ApiServer, }).Manifests("") cmd = fmt.Sprintf(`echo '%s' | kubectl apply -f -`, netyaml) diff --git a/install/join.go b/install/join.go index 149f0f64349..87a773956c8 100644 --- a/install/join.go +++ b/install/join.go @@ -24,9 +24,11 @@ func joinMastersFunc(joinMasters []string) { masters := MasterIPs nodes := NodeIPs i := &SealosInstaller{ - Hosts: joinMasters, - Masters: masters, - Nodes: nodes, + Hosts: joinMasters, + Masters: masters, + Nodes: nodes, + Network: Network, + ApiServer: ApiServer, } i.CheckValid() i.SendSealos() @@ -164,7 +166,6 @@ func (s *SealosInstaller) JoinNodes() { addRouteCmd := fmt.Sprintf("sealos route add --host %s --gateway %s", VIP, IpFormat(node)) SSHConfig.CmdToString(node, addRouteCmd, "") } - _ = SSHConfig.CmdAsync(node, ipvsCmd) // create ipvs rules before we join node cmd := s.Command(Version, JoinNode) diff --git a/install/sealos.go b/install/sealos.go index 6286a479a4b..35a740a0c65 100644 --- a/install/sealos.go +++ b/install/sealos.go @@ -40,9 +40,11 @@ var ( //SealosInstaller is type SealosInstaller struct { - Hosts []string - Masters []string - Nodes []string + Hosts []string + Masters []string + Nodes []string + Network string + ApiServer string } type CommandType string @@ -54,8 +56,8 @@ const JoinNode CommandType = "joinNode" func (s *SealosInstaller) Command(version string, name CommandType) (cmd string) { cmds := make(map[CommandType]string) - // Please convert your v1beta1 configuration files to v1beta2 using the - // "kubeadm config migrate" command of kubeadm v1.15.x, 因此1.14 版本不支持双网卡. + // Please convert your v1beta1 configuration files to v1beta2 using the + // "kubeadm config migrate" command of kubeadm v1.15.x, 因此1.14 版本不支持双网卡. cmds = map[CommandType]string{ InitMaster: `kubeadm init --config=/root/kubeadm-config.yaml --experimental-upload-certs` + vlogToStr(), JoinMaster: fmt.Sprintf("kubeadm join %s:6443 --token %s --discovery-token-ca-cert-hash %s --experimental-control-plane --certificate-key %s"+vlogToStr(), IpFormat(s.Masters[0]), JoinToken, TokenCaCertHash, CertificateKey), @@ -65,8 +67,21 @@ func (s *SealosInstaller) Command(version string, name CommandType) (cmd string) //todo if VersionToInt(version) >= 115 { cmds[InitMaster] = `kubeadm init --config=/root/kubeadm-config.yaml --upload-certs` + vlogToStr() - cmds[JoinMaster] = "kubeadm join --config=/root/kubeadm-join-config.yaml "+vlogToStr() - cmds[JoinNode] = "kubeadm join --config=/root/kubeadm-join-config.yaml "+vlogToStr() + cmds[JoinMaster] = "kubeadm join --config=/root/kubeadm-join-config.yaml " + vlogToStr() + cmds[JoinNode] = "kubeadm join --config=/root/kubeadm-join-config.yaml " + vlogToStr() + } + + // version >= 1.16.x support kubeadm init --skip-phases=addon/kube-proxy + // version <= 115 + // kubectl -n kube-system delete ds kube-proxy + // # Run on each node: + // iptables-restore <(iptables-save | grep -v KUBE) + if s.Network == "cilium" { + if VersionToInt(version) >= 116 { + cmds[InitMaster] = `kubeadm init --skip-phases=addon/kube-proxy --config=/root/kubeadm-config.yaml --upload-certs` + vlogToStr() + } else { + cmds[InitMaster] = `kubeadm init --config=/root/kubeadm-config.yaml --upload-certs` + vlogToStr() + } } v, ok := cmds[name] diff --git a/install/upgrade.go b/install/upgrade.go index f8024d88b5a..ea9d9a0ebc0 100644 --- a/install/upgrade.go +++ b/install/upgrade.go @@ -2,7 +2,7 @@ package install import ( "fmt" - "os" + "os" "time" "k8s.io/client-go/kubernetes" @@ -143,7 +143,7 @@ func (u *SealosUpgrade) upgradeNodes(hostnames []string, isMaster bool) { time.Sleep(time.Second * 10) k8sNode, _ := k8s.GetNodeByName(u.Client, node) if k8s.IsNodeReady(*k8sNode) { - logger.Info("[%s] fourth: %s nodes is ready", ip,node) + logger.Info("[%s] fourth: %s nodes is ready", ip, node) // fifth to uncordon node err = k8s.CordonUnCordon(u.Client, node, false) diff --git a/install/upgrade_pool.go b/install/upgrade_pool.go index cc452bc5178..02907d530b5 100644 --- a/install/upgrade_pool.go +++ b/install/upgrade_pool.go @@ -2,19 +2,18 @@ package install import "sync" - type uPool struct { queue chan int - wg *sync.WaitGroup + wg *sync.WaitGroup } -func NewPool (size int) *uPool { +func NewPool(size int) *uPool { if size <= 1 { size = 1 } return &uPool{ queue: make(chan int, size), - wg: &sync.WaitGroup{}, + wg: &sync.WaitGroup{}, } } @@ -35,4 +34,4 @@ func (p *uPool) Done() { func (p *uPool) Wait() { p.wg.Wait() -} \ No newline at end of file +} diff --git a/install/utils_test.go b/install/utils_test.go index c3013018b13..48ce71b2da1 100644 --- a/install/utils_test.go +++ b/install/utils_test.go @@ -218,10 +218,10 @@ func TestFor120(t *testing.T) { args args want bool }{ - {"test01",args{"v1.19.2"}, false}, - {"test02",args{"v1.18.2"}, false}, - {"test03",args{"v1.20.2"}, true}, - {"test04",args{"v1.20.0-rc.0"}, true}, + {"test01", args{"v1.19.2"}, false}, + {"test02", args{"v1.18.2"}, false}, + {"test03", args{"v1.20.2"}, true}, + {"test04", args{"v1.20.0-rc.0"}, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -245,4 +245,4 @@ func Test_Example(t *testing.T) { } pool.Wait() println(runtime.NumGoroutine()) -} \ No newline at end of file +} diff --git a/k8s/node.go b/k8s/node.go index 756648ab47a..4d74f281b44 100644 --- a/k8s/node.go +++ b/k8s/node.go @@ -17,11 +17,11 @@ import ( ) const ( - HostnameLabel = "kubernetes.io/hostname" - NodeRoleLabel = "node-role.kubernetes.io/master" - MaxRetries = 5 - RetryInterval = 5 - WrapTransportTimeout = 30 + HostnameLabel = "kubernetes.io/hostname" + NodeRoleLabel = "node-role.kubernetes.io/master" + MaxRetries = 5 + RetryInterval = 5 + WrapTransportTimeout = 30 ) var ( diff --git a/k8s/utlis_test.go b/k8s/utlis_test.go index fac22452f94..bbb84600bcc 100644 --- a/k8s/utlis_test.go +++ b/k8s/utlis_test.go @@ -131,7 +131,7 @@ func Test_removeByUse(t *testing.T) { func Benchmark_remove(b *testing.B) { b.ResetTimer() origin := []string{"123", "245", "345", "123", "245", "345", "123", "245", "345"} - for i:=0; i + +package net + +import "strings" + +type Cilium struct { + metadata MetaData +} + +func (c Cilium) Manifests(template string) string { + if template == "" { + template = c.Template() + } + if c.metadata.CniRepo == "" || c.metadata.CniRepo == "k8s.gcr.io" { + c.metadata.CniRepo = "quay.io/cilium" + } + //if c.metadata.Interface == "" { + // c.metadata.Interface = "interface=" + defaultInterface + //} + if c.metadata.CIDR == "" { + c.metadata.CIDR = defaultCIDR + } + + if c.metadata.K8sServiceHost == "" { + c.metadata.K8sServiceHost = defaultK8sServiceHost + } + if c.metadata.K8sServicePort == "" { + c.metadata.K8sServicePort = defaultK8sServicePort + } + c.metadata.K8sServiceHost = strings.Split(c.metadata.K8sServiceHost, ":")[0] + + return render(c.metadata, template) +} + +func (c Cilium) Template() string { + return CiliumManifests +} + +// docs https://docs.cilium.io/en/stable/gettingstarted/minikube/ +// quick install kubectl create -f https://raw.githubusercontent.com/cilium/cilium/1.9.3/install/kubernetes/quick-install.yaml +// experimental kubectl create -f https://raw.githubusercontent.com/cilium/cilium/1.9.3/install/kubernetes/experimental-install.yaml +// cilium kernel-check kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/1.9.3/examples/kubernetes/kernel-check/kernel-check.yaml +const CiliumManifests = ` +--- +# Source: cilium/templates/cilium-agent-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cilium-operator + namespace: kube-system +--- +# Source: cilium/templates/cilium-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cilium-config + namespace: kube-system +data: + + # Identity allocation mode selects how identities are shared between cilium + # nodes by setting how they are stored. The options are "crd" or "kvstore". + # - "crd" stores identities in kubernetes as CRDs (custom resource definition). + # These can be queried with: + # kubectl get ciliumid + # - "kvstore" stores identities in a kvstore, etcd or consul, that is + # configured below. Cilium versions before 1.6 supported only the kvstore + # backend. Upgrades from these older cilium versions should continue using + # the kvstore by commenting out the identity-allocation-mode below, or + # setting it to "kvstore". + identity-allocation-mode: crd + cilium-endpoint-gc-interval: "5m0s" + + # If you want to run cilium in debug mode change this value to true + debug: "false" + # The agent can be put into the following three policy enforcement modes + # default, always and never. + # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes + enable-policy: "default" + + # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 + # address. + enable-ipv4: "true" + + # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 + # address. + enable-ipv6: "false" + + # -- Clean all eBPF datapath state from the initContainer of the cilium-agent + # DaemonSet. + # + # WARNING: Use with care! + clean-cilium-bpf-state: "false" + + # -- Clean all local Cilium state from the initContainer of the cilium-agent + # DaemonSet. Implies cleanBpfState: true. + # + # WARNING: Use with care! + clean-cilium-state: "false" + + # Users who wish to specify their own custom CNI configuration file must set + # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. + custom-cni-conf: "false" + enable-bpf-clock-probe: "true" + # If you want cilium monitor to aggregate tracing for packets, set this level + # to "low", "medium", or "maximum". The higher the level, the less packets + # that will be seen in monitor output. + monitor-aggregation: medium + + # The monitor aggregation interval governs the typical time between monitor + # notification events for each allowed connection. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-interval: 5s + + # The monitor aggregation flags determine which TCP flags which, upon the + # first observation, cause monitor notifications to be generated. + # + # Only effective when monitor aggregation is set to "medium" or higher. + monitor-aggregation-flags: all + # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic + # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. + bpf-map-dynamic-size-ratio: "0.0025" + # bpf-policy-map-max specifies the maximum number of entries in endpoint + # policy map (per endpoint) + bpf-policy-map-max: "16384" + # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, + # backend and affinity maps. + bpf-lb-map-max: "65536" + # Pre-allocation of map entries allows per-packet latency to be reduced, at + # the expense of up-front memory allocation for the entries in the maps. The + # default value below will minimize memory usage in the default installation; + # users who are sensitive to latency may consider setting this to "true". + # + # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore + # this option and behave as though it is set to "true". + # + # If this value is modified, then during the next Cilium startup the restore + # of existing endpoints and tracking of ongoing connections may be disrupted. + # As a result, reply packets may be dropped and the load-balancing decisions + # for established connections may change. + # + # If this option is set to "false" during an upgrade from 1.3 or earlier to + # 1.4 or later, then it may cause one-time disruptions during the upgrade. + preallocate-bpf-maps: "true" + + # Regular expression matching compatible Istio sidecar istio-proxy + # container image names + sidecar-istio-proxy-image: "cilium/istio_proxy" + + # Name of the cluster. Only relevant when building a mesh of clusters. + cluster-name: default + # Unique ID of the cluster. Must be unique across all conneted clusters and + # in the range of 1 and 255. Only relevant when building a mesh of clusters. + cluster-id: "" + + # Encapsulation mode for communication between nodes + # Possible values: + # - disabled + # - vxlan (default) + # - geneve + tunnel: vxlan + # Enables L7 proxy for L7 policy enforcement and visibility + enable-l7-proxy: "true" + + # wait-bpf-mount makes init container wait until bpf filesystem is mounted + wait-bpf-mount: "false" + + masquerade: "true" + enable-bpf-masquerade: "true" + + enable-xt-socket-fallback: "true" + install-iptables-rules: "true" + + auto-direct-node-routes: "true" + enable-bandwidth-manager: "false" + enable-local-redirect-policy: "false" + + kube-proxy-replacement: "strict" + kube-proxy-replacement-healthz-bind-address: "" + enable-health-check-nodeport: "true" + node-port-bind-protection: "true" + enable-auto-protect-node-port-range: "true" + enable-session-affinity: "true" + enable-endpoint-health-checking: "true" + enable-health-checking: "true" + enable-well-known-identities: "false" + enable-remote-node-identity: "true" + operator-api-serve-addr: "127.0.0.1:9234" + # Enable Hubble gRPC service. + enable-hubble: "true" + # UNIX domain socket for Hubble server to listen to. + hubble-socket-path: "/var/run/cilium/hubble.sock" + # An additional address for Hubble server to listen to (e.g. ":4244"). + hubble-listen-address: ":4244" + hubble-disable-tls: "false" + hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt + hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key + hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt + ipam: "cluster-pool" + cluster-pool-ipv4-cidr: "{{ .CIDR }}" + cluster-pool-ipv4-mask-size: "24" + disable-cnp-status-updates: "true" +--- +# Source: cilium/templates/cilium-agent-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - nodes + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - pods/finalizers + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + # Deprecated for removal in v1.10 + - create + - list + - watch + - update + + # This is used when validating policies in preflight. This will need to stay + # until we figure out how to avoid "get" inside the preflight, and then + # should be removed ideally. + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - "*" +--- +# Source: cilium/templates/cilium-operator-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + # to automatically delete [core|kube]dns pods so that are starting to being + # managed by Cilium + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + # to perform the translation of a CNP that contains ToGroup to its endpoints + - services + - endpoints + # to check apiserver connectivity + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - "*" +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +# For cilium-operator running in HA mode. +# +# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election +# between mulitple running instances. +# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less +# common and fewer objects in the cluster watch "all Leases". +# The support for leases was introduced in coordination.k8s.io/v1 during Kubernetes 1.14 release. +# In Cilium we currently dont support HA mode for K8s version < 1.14. This condition make sure +# that we only authorize access to leases resources in supported K8s versions. +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +--- +# Source: cilium/templates/cilium-agent-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system +--- +# Source: cilium/templates/cilium-operator-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +--- +# Source: cilium/templates/cilium-agent-daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate + template: + metadata: + annotations: + # This annotation plus the CriticalAddonsOnly toleration makes + # cilium to be a critical pod in the cluster, which ensures cilium + # gets priority scheduling. + # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + k8s-app: cilium + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - cilium + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 10 + # The initial delay for the liveness probe is intentionally large to + # avoid an endless kill & restart cycle if in the event that the initial + # bootstrapping takes longer than expected. + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9876 + scheme: HTTP + httpHeaders: + - name: "brief" + value: "true" + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_FLANNEL_MASTER_DEVICE + valueFrom: + configMapKeyRef: + key: flannel-master-device + name: cilium-config + optional: true + - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT + valueFrom: + configMapKeyRef: + key: flannel-uninstall-on-exit + name: cilium-config + optional: true + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true +{{- if .K8sServiceHost }} + - name: KUBERNETES_SERVICE_HOST + value: "{{ .K8sServiceHost }}" +{{- end }} +{{- if .K8sServicePort }} + - name: KUBERNETES_SERVICE_PORT + value: "{{ .K8sServicePort }}" +{{- end }} + image: {{ .CniRepo }}/cilium:v1.9.3 + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - "/cni-install.sh" + - "--enable-debug=false" + preStop: + exec: + command: + - /cni-uninstall.sh + name: cilium-agent + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + # Needed to be able to load kernel modules + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /var/lib/cilium/tls/hubble + name: hubble-tls + readOnly: true + hostNetwork: true + initContainers: + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + - name: CILIUM_WAIT_BPF_MOUNT + valueFrom: + configMapKeyRef: + key: wait-bpf-mount + name: cilium-config + optional: true +{{- if .K8sServiceHost }} + - name: KUBERNETES_SERVICE_HOST + value: "{{ .K8sServiceHost }}" +{{- end }} +{{- if .K8sServicePort }} + - name: KUBERNETES_SERVICE_PORT + value: "{{ .K8sServicePort }}" +{{- end }} + image: {{ .CniRepo }}/cilium:v1.9.3 + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + mountPropagation: HostToContainer + - mountPath: /var/run/cilium + name: cilium-run + resources: + requests: + cpu: 100m + memory: 100Mi + restartPolicy: Always + priorityClassName: system-node-critical + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + # To keep state between restarts / upgrades + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + # To keep state between restarts / upgrades for bpf maps + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + # To install cilium cni plugin in the host + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + # To install cilium cni configuration in the host + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + # To be able to load kernel modules + - hostPath: + path: /lib/modules + name: lib-modules + # To access iptables concurrently with other processes (e.g. kube-proxy) + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + # To read the clustermesh configuration + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path + - name: hubble-tls + projected: + sources: + - secret: + name: hubble-server-certs + items: + - key: tls.crt + path: server.crt + - key: tls.key + path: server.key + optional: true + - configMap: + name: hubble-ca-cert + items: + - key: ca.crt + path: client-ca.crt + optional: true +--- +# Source: cilium/templates/cilium-operator-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: kube-system +spec: + # We support HA mode only for Kubernetes version > 1.14 + # See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go + # for more details. + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + io.cilium/app: operator + name: cilium-operator + spec: + # In HA mode, cilium-operator pods must not be scheduled on the same + # node as they will clash with each other. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: io.cilium/app + operator: In + values: + - operator + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true +{{- if .K8sServiceHost }} + - name: KUBERNETES_SERVICE_HOST + value: "{{ .K8sServiceHost }}" +{{- end }} +{{- if .K8sServicePort }} + - name: KUBERNETES_SERVICE_PORT + value: "{{ .K8sServicePort }}" +{{- end }} + image: {{ .CniRepo }}/operator-generic:v1.9.3 + imagePullPolicy: IfNotPresent + name: cilium-operator + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + restartPolicy: Always + priorityClassName: system-cluster-critical + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + volumes: + # To read the configuration from the config map + - configMap: + name: cilium-config + name: cilium-config-path +` diff --git a/net/flannel.go b/net/flannel.go index 5053aa00588..023c249a33c 100644 --- a/net/flannel.go +++ b/net/flannel.go @@ -15,6 +15,10 @@ func (f Flannel) Manifests(template string) string { f.metadata.CIDR = defaultCIDR } + if f.metadata.CniRepo == "" || f.metadata.CniRepo == "k8s.gcr.io" { + f.metadata.CniRepo = "quay.io.azk8s.cn/coreos" + } + return render(f.metadata, template) } @@ -22,9 +26,9 @@ func (Flannel) Template() string { return FlannelManifests } -// kube-flannel.yaml uses ClusterRole & ClusterRoleBinding of rbac.authorization.k8s.io/v1. When you use Kubernetes v1.16, -// you should replace rbac.authorization.k8s.io/v1 to rbac.authorization.k8s.io/v1beta1 -// because rbac.authorization.k8s.io/v1 had become GA from Kubernetes v1.17. +// kube-flannel.yaml uses ClusterRole & ClusterRoleBinding of rbac.authorization.k8s.io/v1. When you use Kubernetes v1.16, +// you should replace rbac.authorization.k8s.io/v1 to rbac.authorization.k8s.io/v1beta1 +// because rbac.authorization.k8s.io/v1 had become GA from Kubernetes v1.17. // TODO v1.16- 如果使用flannel 需要使用另外的yaml const FlannelManifests = ` --- @@ -198,7 +202,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-amd64 + image: {{ .CniRepo }}/flannel:v0.11.0-amd64 command: - cp args: @@ -212,7 +216,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-amd64 + image: {{ .CniRepo }}/flannel:v0.11.0-amd64 command: - /opt/bin/flanneld args: @@ -293,7 +297,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-arm64 + image: {{ .CniRepo }}/flannel:v0.11.0-arm64 command: - cp args: @@ -307,7 +311,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-arm64 + image: {{ .CniRepo }}/flannel:v0.11.0-arm64 command: - /opt/bin/flanneld args: @@ -388,7 +392,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-arm + image: {{ .CniRepo }}/flannel:v0.11.0-arm command: - cp args: @@ -402,7 +406,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-arm + image: {{ .CniRepo }}/flannel:v0.11.0-arm command: - /opt/bin/flanneld args: @@ -483,7 +487,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-ppc64le + image: {{ .CniRepo }}/flannel:v0.11.0-ppc64le command: - cp args: @@ -497,7 +501,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-ppc64le + image: {{ .CniRepo }}/flannel:v0.11.0-ppc64le command: - /opt/bin/flanneld args: @@ -578,7 +582,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-s390x + image: {{ .CniRepo }}/flannel:v0.11.0-s390x command: - cp args: @@ -592,7 +596,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io.azk8s.cn/coreos/flannel:v0.11.0-s390x + image: {{ .CniRepo }}/flannel:v0.11.0-s390x command: - /opt/bin/flanneld args: @@ -636,7 +640,7 @@ spec: name: kube-flannel-cfg ` -// lastet flannel k8s manifests. For Kubernetes v1.17+ +// lastet flannel k8s manifests. For Kubernetes v1.17+ const newFlannelYaml = ` --- apiVersion: policy/v1beta1 @@ -806,7 +810,7 @@ spec: serviceAccountName: flannel initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.13.1-rc1 + image: {{ .CniRepo }}/flannel:v0.13.1-rc1 command: - cp args: @@ -820,7 +824,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.13.1-rc1 + image: {{ .CniRepo }}/flannel:v0.13.1-rc1 command: - /opt/bin/flanneld args: @@ -862,4 +866,4 @@ spec: - name: flannel-cfg configMap: name: kube-flannel-cfg -` \ No newline at end of file +` diff --git a/net/net.go b/net/net.go index 1fc49220d8f..21a2d51f61d 100644 --- a/net/net.go +++ b/net/net.go @@ -6,10 +6,13 @@ import ( ) const ( - CALICO = "calico" - FLANNEL = "flannel" - defaultInterface = "eth.*|en.*" - defaultCIDR = "100.64.0.0/10" + CALICO = "calico" + FLANNEL = "flannel" + CILIUM = "cilium" + defaultInterface = "eth.*|en.*" + defaultCIDR = "100.64.0.0/10" + defaultK8sServiceHost = "127.0.0.1" + defaultK8sServicePort = "6443" ) type MetaData struct { @@ -18,7 +21,10 @@ type MetaData struct { // ipip mode for calico.yml IPIP bool // MTU size - MTU string + MTU string + CniRepo string + K8sServiceHost string + K8sServicePort string } // Net is CNI interface @@ -35,6 +41,8 @@ func NewNetwork(t string, metadata MetaData) Net { return &Calico{metadata: metadata} case FLANNEL: return &Flannel{metadata: metadata} + case CILIUM: + return &Cilium{metadata: metadata} default: return &Calico{metadata: metadata} } diff --git a/net/net_test.go b/net/net_test.go index b2d58436d51..27458fe1437 100644 --- a/net/net_test.go +++ b/net/net_test.go @@ -14,11 +14,10 @@ func TestNewNetwork(t *testing.T) { }).Manifests("") fmt.Println(netyaml) - netyaml = NewNetwork("calico", MetaData{ - Interface: "can-reach=192.168.160.1", - CIDR: "10.1.1.1/24", - IPIP: true, - MTU: "1440", + netyaml = NewNetwork("cilium", MetaData{ + CIDR: "10.1.1.1/24", + K8sServiceHost: "127.0.0.1", + // K8sServicePort: "6443", }).Manifests("") fmt.Println(netyaml) } diff --git a/pkg/sshcmd/sshutil/connect.go b/pkg/sshcmd/sshutil/connect.go index 077ecc8450c..8aca7f92e0d 100644 --- a/pkg/sshcmd/sshutil/connect.go +++ b/pkg/sshcmd/sshutil/connect.go @@ -5,7 +5,6 @@ import ( "github.com/wonderivan/logger" "golang.org/x/crypto/ssh" "io/ioutil" - "net" "os" "strings" "time" @@ -28,9 +27,7 @@ func (ss *SSH) connect(host string) (*ssh.Client, error) { Auth: auth, Timeout: *ss.Timeout, Config: config, - HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error { - return nil - }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), } addr := ss.addrReformat(host)