diff --git a/charts/kube-ovn/templates/controller-deploy.yaml b/charts/kube-ovn/templates/controller-deploy.yaml index c5f908a7134..13cc27976d0 100644 --- a/charts/kube-ovn/templates/controller-deploy.yaml +++ b/charts/kube-ovn/templates/controller-deploy.yaml @@ -116,6 +116,13 @@ spec: - --keep-vm-ip={{- .Values.func.ENABLE_KEEP_VM_IP }} - --enable-metrics={{- .Values.networking.ENABLE_METRICS }} - --node-local-dns-ip={{- .Values.networking.NODE_LOCAL_DNS_IP }} + - --secure-serving={{- .Values.func.SECURE_SERVING }} + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - NET_BIND_SERVICE env: - name: ENABLE_SSL value: "{{ .Values.networking.ENABLE_SSL }}" @@ -123,6 +130,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: KUBE_NAMESPACE valueFrom: fieldRef: @@ -133,6 +144,10 @@ spec: fieldPath: spec.nodeName - name: OVN_DB_IPS value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: POD_IPS valueFrom: fieldRef: diff --git a/charts/kube-ovn/templates/monitor-deploy.yaml b/charts/kube-ovn/templates/monitor-deploy.yaml index 5099feb2b7b..88bf77c2985 100644 --- a/charts/kube-ovn/templates/monitor-deploy.yaml +++ b/charts/kube-ovn/templates/monitor-deploy.yaml @@ -44,6 +44,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} command: ["/kube-ovn/start-ovn-monitor.sh"] args: + - --secure-serving={{- .Values.func.SECURE_SERVING }} - --log_file=/var/log/kube-ovn/kube-ovn-monitor.log - --logtostderr=false - --alsologtostderr=true @@ -58,6 +59,18 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: POD_IPS valueFrom: fieldRef: diff --git a/charts/kube-ovn/templates/ovn-CR.yaml b/charts/kube-ovn/templates/ovn-CR.yaml index 6f0ae15e9a1..e656568e4a8 100644 --- a/charts/kube-ovn/templates/ovn-CR.yaml +++ b/charts/kube-ovn/templates/ovn-CR.yaml @@ -239,7 +239,18 @@ rules: - get - list - watch - + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -262,3 +273,15 @@ rules: - daemonsets verbs: - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/charts/kube-ovn/templates/ovn-CRB.yaml b/charts/kube-ovn/templates/ovn-CRB.yaml index 9230d90035b..e5b36922785 100644 --- a/charts/kube-ovn/templates/ovn-CRB.yaml +++ b/charts/kube-ovn/templates/ovn-CRB.yaml @@ -38,7 +38,20 @@ subjects: - kind: ServiceAccount name: kube-ovn-cni namespace: {{ .Values.namespace }} - +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-cni + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-cni + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -52,3 +65,17 @@ subjects: - kind: ServiceAccount name: kube-ovn-app namespace: {{ .Values.namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-app + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-app + namespace: kube-system diff --git a/charts/kube-ovn/templates/ovncni-ds.yaml b/charts/kube-ovn/templates/ovncni-ds.yaml index c293407e31d..7bc6a81c52e 100644 --- a/charts/kube-ovn/templates/ovncni-ds.yaml +++ b/charts/kube-ovn/templates/ovncni-ds.yaml @@ -82,6 +82,7 @@ spec: - --kubelet-dir={{ .Values.kubelet_conf.KUBELET_DIR }} - --enable-tproxy={{ .Values.func.ENABLE_TPROXY }} - --ovs-vsctl-concurrency={{ .Values.performance.OVS_VSCTL_CONCURRENCY }} + - --secure-serving={{- .Values.func.SECURE_SERVING }} securityContext: runAsUser: 0 privileged: true @@ -96,6 +97,14 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: POD_IPS valueFrom: fieldRef: diff --git a/charts/kube-ovn/values.yaml b/charts/kube-ovn/values.yaml index c3cf58d4f4b..cd9efac22e8 100644 --- a/charts/kube-ovn/values.yaml +++ b/charts/kube-ovn/values.yaml @@ -68,6 +68,7 @@ func: CHECK_GATEWAY: true LOGICAL_GATEWAY: false ENABLE_BIND_LOCAL_IP: true + SECURE_SERVING: false U2O_INTERCONNECTION: false ENABLE_TPROXY: false ENABLE_IC: false diff --git a/cmd/controller/controller.go b/cmd/controller/controller.go index ed5cfec6067..2ed0dc96405 100644 --- a/cmd/controller/controller.go +++ b/cmd/controller/controller.go @@ -6,7 +6,6 @@ import ( "net/http" "net/http/pprof" "os" - "strings" "time" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -23,11 +22,15 @@ import ( kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" "github.com/kubeovn/kube-ovn/pkg/controller" + "github.com/kubeovn/kube-ovn/pkg/server" "github.com/kubeovn/kube-ovn/pkg/util" "github.com/kubeovn/kube-ovn/versions" ) -const ovnLeaderResource = "kube-ovn-controller" +const ( + svcName = "kube-ovn-controller" + ovnLeaderResource = "kube-ovn-controller" +) func CmdMain() { defer klog.Flush() @@ -68,27 +71,21 @@ func CmdMain() { mux.HandleFunc("/debug/pprof/trace", pprof.Trace) } - addr := "0.0.0.0" - if os.Getenv("ENABLE_BIND_LOCAL_IP") == "true" { - podIpsEnv := os.Getenv("POD_IPS") - podIps := strings.Split(podIpsEnv, ",") - // when pod in dual mode, golang can't support bind v4 and v6 address in the same time, - // so not support bind local ip when in dual mode - if len(podIps) == 1 { - addr = podIps[0] - if util.CheckProtocol(podIps[0]) == kubeovnv1.ProtocolIPv6 { - addr = fmt.Sprintf("[%s]", podIps[0]) - } + addr := util.JoinHostPort(util.GetDefaultListenAddr(), config.PprofPort) + if !config.SecureServing { + server := &http.Server{ + Addr: addr, + ReadHeaderTimeout: 3 * time.Second, + Handler: mux, } + util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and server on %s", server.Addr) + } else { + ch, err := server.SecureServing(addr, svcName, mux) + if err != nil { + util.LogFatalAndExit(err, "failed to serve on %s", addr) + } + <-ch } - // conform to Gosec G114 - // https://github.com/securego/gosec#available-rules - server := &http.Server{ - Addr: fmt.Sprintf("%s:%d", addr, config.PprofPort), - ReadHeaderTimeout: 3 * time.Second, - Handler: mux, - } - util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and server on %s", server.Addr) }() // ctx, cancel := context.WithCancel(context.Background()) diff --git a/cmd/controller_health_check/controller_health_check.go b/cmd/controller_health_check/controller_health_check.go index 0c38a82d489..d77fbe83934 100644 --- a/cmd/controller_health_check/controller_health_check.go +++ b/cmd/controller_health_check/controller_health_check.go @@ -1,29 +1,17 @@ package controller_health_check import ( - "fmt" "net" "os" - "strings" "time" - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" "github.com/kubeovn/kube-ovn/pkg/util" ) func CmdMain() { addr := "127.0.0.1:10660" if os.Getenv("ENABLE_BIND_LOCAL_IP") == "true" { - podIpsEnv := os.Getenv("POD_IPS") - podIps := strings.Split(podIpsEnv, ",") - // when pod in dual mode, golang can't support bind v4 and v6 address in the same time, - // so not support bind local ip when in dual mode - if len(podIps) == 1 { - addr = fmt.Sprintf("%s:10660", podIps[0]) - if util.CheckProtocol(podIps[0]) == kubeovnv1.ProtocolIPv6 { - addr = fmt.Sprintf("[%s]:10660", podIps[0]) - } - } + addr = util.JoinHostPort(os.Getenv("POD_IP"), 10660) } conn, err := net.DialTimeout("tcp", addr, 3*time.Second) diff --git a/cmd/daemon/cniserver.go b/cmd/daemon/cniserver.go index 448ea42da7b..8d06b9818a3 100644 --- a/cmd/daemon/cniserver.go +++ b/cmd/daemon/cniserver.go @@ -18,10 +18,13 @@ import ( kubeovninformer "github.com/kubeovn/kube-ovn/pkg/client/informers/externalversions" "github.com/kubeovn/kube-ovn/pkg/daemon" "github.com/kubeovn/kube-ovn/pkg/ovs" + "github.com/kubeovn/kube-ovn/pkg/server" "github.com/kubeovn/kube-ovn/pkg/util" "github.com/kubeovn/kube-ovn/versions" ) +const svcName = "kube-ovn-cni" + func CmdMain() { defer klog.Flush() @@ -97,31 +100,37 @@ func CmdMain() { } addr := util.GetDefaultListenAddr() - if config.EnableVerboseConnCheck { go func() { - connListenaddr := fmt.Sprintf("%s:%d", addr, config.TCPConnCheckPort) + connListenaddr := util.JoinHostPort(addr, config.TCPConnCheckPort) if err := util.TCPConnectivityListen(connListenaddr); err != nil { util.LogFatalAndExit(err, "failed to start TCP listen on addr %s", addr) } }() go func() { - connListenaddr := fmt.Sprintf("%s:%d", addr, config.UDPConnCheckPort) + connListenaddr := util.JoinHostPort(addr, config.UDPConnCheckPort) if err := util.UDPConnectivityListen(connListenaddr); err != nil { util.LogFatalAndExit(err, "failed to start UDP listen on addr %s", addr) } }() } - // conform to Gosec G114 - // https://github.com/securego/gosec#available-rules - server := &http.Server{ - Addr: fmt.Sprintf("%s:%d", addr, config.PprofPort), - ReadHeaderTimeout: 3 * time.Second, - Handler: mux, + listenAddr := util.JoinHostPort(addr, config.PprofPort) + if !config.SecureServing { + server := &http.Server{ + Addr: listenAddr, + ReadHeaderTimeout: 3 * time.Second, + Handler: mux, + } + util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and server on %s", server.Addr) + } else { + ch, err := server.SecureServing(listenAddr, svcName, mux) + if err != nil { + util.LogFatalAndExit(err, "failed to serve on %s", listenAddr) + } + <-ch } - util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and serve on %s", server.Addr) } func mvCNIConf(configDir, configFile, confName string) error { diff --git a/cmd/ovn_monitor/ovn_monitor.go b/cmd/ovn_monitor/ovn_monitor.go index 86fb5ae8629..d9979a5d998 100644 --- a/cmd/ovn_monitor/ovn_monitor.go +++ b/cmd/ovn_monitor/ovn_monitor.go @@ -1,7 +1,6 @@ package ovn_monitor import ( - "fmt" "net/http" "os" "strings" @@ -10,12 +9,16 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/klog/v2" - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" ovn "github.com/kubeovn/kube-ovn/pkg/ovnmonitor" + "github.com/kubeovn/kube-ovn/pkg/server" "github.com/kubeovn/kube-ovn/pkg/util" "github.com/kubeovn/kube-ovn/versions" ) +const svcName = "kube-ovn-monitor" + +const port = 10661 + func CmdMain() { defer klog.Flush() @@ -25,6 +28,13 @@ func CmdMain() { util.LogFatalAndExit(err, "failed to parse config") } + addr := config.ListenAddress + if os.Getenv("ENABLE_BIND_LOCAL_IP") == "true" { + if ips := strings.Split(os.Getenv("POD_IPS"), ","); len(ips) == 1 { + addr = util.JoinHostPort(ips[0], port) + } + } + exporter := ovn.NewExporter(config) if err = exporter.StartConnection(); err != nil { klog.Errorf("%s failed to connect db socket properly: %s", ovn.GetExporterName(), err) @@ -34,30 +44,21 @@ func CmdMain() { mux := http.NewServeMux() if config.EnableMetrics { mux.Handle(config.MetricsPath, promhttp.Handler()) - klog.Infoln("Listening on", config.ListenAddress) + klog.Infoln("Listening on", addr) } - // conform to Gosec G114 - // https://github.com/securego/gosec#available-rules - - addr := config.ListenAddress - if os.Getenv("ENABLE_BIND_LOCAL_IP") == "true" { - podIpsEnv := os.Getenv("POD_IPS") - podIps := strings.Split(podIpsEnv, ",") - // when pod in dual mode, golang can't support bind v4 and v6 address in the same time, - // so not support bind local ip when in dual mode - if len(podIps) == 1 { - addr = fmt.Sprintf("%s:10661", podIps[0]) - if util.CheckProtocol(podIps[0]) == kubeovnv1.ProtocolIPv6 { - addr = fmt.Sprintf("[%s]:10661", podIps[0]) - } + if !config.SecureServing { + server := &http.Server{ + Addr: addr, + ReadHeaderTimeout: 3 * time.Second, + Handler: mux, } + util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and server on %s", addr) + } else { + ch, err := server.SecureServing(addr, svcName, mux) + if err != nil { + util.LogFatalAndExit(err, "failed to serve on %s", addr) + } + <-ch } - - server := &http.Server{ - Addr: addr, - ReadHeaderTimeout: 3 * time.Second, - Handler: mux, - } - util.LogFatalAndExit(server.ListenAndServe(), "failed to listen and server on %s", config.ListenAddress) } diff --git a/dist/images/install.sh b/dist/images/install.sh index 05a28d94e16..b770d929b98 100644 --- a/dist/images/install.sh +++ b/dist/images/install.sh @@ -38,6 +38,7 @@ ENABLE_BIND_LOCAL_IP=${ENABLE_BIND_LOCAL_IP:-true} ENABLE_TPROXY=${ENABLE_TPROXY:-false} OVS_VSCTL_CONCURRENCY=${OVS_VSCTL_CONCURRENCY:-100} ENABLE_COMPACT=${ENABLE_COMPACT:-false} +SECURE_SERVING=${SECURE_SERVING:-false} # debug DEBUG_WRAPPER=${DEBUG_WRAPPER:-} @@ -3160,6 +3161,18 @@ rules: - get - list - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -3173,6 +3186,20 @@ subjects: - kind: ServiceAccount name: kube-ovn-cni namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-cni + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-cni + namespace: kube-system EOF cat < kube-ovn-app-sa.yaml @@ -3204,6 +3231,18 @@ rules: - daemonsets verbs: - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -3217,6 +3256,20 @@ subjects: - kind: ServiceAccount name: kube-ovn-app namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kube-ovn-app + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: kube-ovn-app + namespace: kube-system EOF kubectl apply -f kube-ovn-crd.yaml @@ -4058,6 +4111,13 @@ spec: - --enable-lb-svc=$ENABLE_LB_SVC - --keep-vm-ip=$ENABLE_KEEP_VM_IP - --node-local-dns-ip=$NODE_LOCAL_DNS_IP + - --secure-serving=${SECURE_SERVING} + securityContext: + runAsUser: 0 + privileged: false + capabilities: + add: + - NET_BIND_SERVICE env: - name: ENABLE_SSL value: "$ENABLE_SSL" @@ -4065,6 +4125,10 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: KUBE_NAMESPACE valueFrom: fieldRef: @@ -4075,6 +4139,10 @@ spec: fieldPath: spec.nodeName - name: OVN_DB_IPS value: $addresses + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: POD_IPS valueFrom: fieldRef: @@ -4199,6 +4267,7 @@ spec: - --kubelet-dir=$KUBELET_DIR - --enable-tproxy=$ENABLE_TPROXY - --ovs-vsctl-concurrency=$OVS_VSCTL_CONCURRENCY + - --secure-serving=${SECURE_SERVING} securityContext: runAsUser: 0 privileged: true @@ -4221,6 +4290,14 @@ spec: valueFrom: fieldRef: fieldPath: status.podIPs + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: ENABLE_BIND_LOCAL_IP value: "$ENABLE_BIND_LOCAL_IP" - name: DBUS_SYSTEM_BUS_ADDRESS @@ -4486,6 +4563,7 @@ spec: imagePullPolicy: $IMAGE_PULL_POLICY command: ["/kube-ovn/start-ovn-monitor.sh"] args: + - --secure-serving=${SECURE_SERVING} - --log_file=/var/log/kube-ovn/kube-ovn-monitor.log - --logtostderr=false - --alsologtostderr=true @@ -4500,6 +4578,18 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP - name: POD_IPS valueFrom: fieldRef: diff --git a/go.mod b/go.mod index db30f972911..fe541c1d334 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.0.2 k8s.io/api v0.30.3 k8s.io/apimachinery v0.30.3 + k8s.io/apiserver v0.30.3 k8s.io/client-go v12.0.0+incompatible k8s.io/klog/v2 v2.130.1 k8s.io/kubectl v0.30.3 @@ -238,7 +239,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.30.3 // indirect - k8s.io/apiserver v0.30.3 // indirect k8s.io/cli-runtime v0.30.3 // indirect k8s.io/cloud-provider v0.30.3 // indirect k8s.io/cluster-bootstrap v0.30.3 // indirect diff --git a/pkg/controller/config.go b/pkg/controller/config.go index 7dec031a5b8..8330cb8e9d0 100644 --- a/pkg/controller/config.go +++ b/pkg/controller/config.go @@ -21,7 +21,6 @@ import ( // Configuration is the controller conf type Configuration struct { - BindAddress string OvnNbAddr string OvnSbAddr string OvnTimeout int @@ -68,8 +67,9 @@ type Configuration struct { PodNicType string WorkerNum int - PprofPort int + PprofPort int32 EnablePprof bool + SecureServing bool NodePgProbeTime int NetworkType string @@ -143,7 +143,8 @@ func ParseFlags() (*Configuration, error) { argWorkerNum = pflag.Int("worker-num", 3, "The parallelism of each worker") argEnablePprof = pflag.Bool("enable-pprof", false, "Enable pprof") - argPprofPort = pflag.Int("pprof-port", 10660, "The port to get profiling data") + argPprofPort = pflag.Int32("pprof-port", 10660, "The port to get profiling data") + argSecureServing = pflag.Bool("secure-serving", false, "Enable secure serving") argNodePgProbeTime = pflag.Int("nodepg-probe-time", 1, "The probe interval for node port-group, the unit is minute") argNetworkType = pflag.String("network-type", util.NetworkTypeGeneve, "The ovn network type") @@ -226,6 +227,7 @@ func ParseFlags() (*Configuration, error) { WorkerNum: *argWorkerNum, EnablePprof: *argEnablePprof, PprofPort: *argPprofPort, + SecureServing: *argSecureServing, NetworkType: *argNetworkType, DefaultVlanID: *argDefaultVlanID, LsDnatModDlDst: *argLsDnatModDlDst, diff --git a/pkg/daemon/config.go b/pkg/daemon/config.go index 96daa47cbfd..738743ca8c1 100644 --- a/pkg/daemon/config.go +++ b/pkg/daemon/config.go @@ -49,7 +49,8 @@ type Configuration struct { EncapChecksum bool EnablePprof bool MacLearningFallback bool - PprofPort int + PprofPort int32 + SecureServing bool NetworkType string CniConfDir string CniConfFile string @@ -62,8 +63,8 @@ type Configuration struct { EnableArpDetectIPConflict bool KubeletDir string EnableVerboseConnCheck bool - TCPConnCheckPort int - UDPConnCheckPort int + TCPConnCheckPort int32 + UDPConnCheckPort int32 EnableTProxy bool OVSVsctlConcurrency int32 } @@ -86,7 +87,8 @@ func ParseFlags() *Configuration { argNodeSwitch = pflag.String("node-switch", "join", "The name of node gateway switch which help node to access pod network") argEncapChecksum = pflag.Bool("encap-checksum", true, "Enable checksum") argEnablePprof = pflag.Bool("enable-pprof", false, "Enable pprof") - argPprofPort = pflag.Int("pprof-port", 10665, "The port to get profiling data") + argPprofPort = pflag.Int32("pprof-port", 10665, "The port to get profiling data") + argSecureServing = pflag.Bool("secure-serving", false, "Enable secure serving") argMacLearningFallback = pflag.Bool("mac-learning-fallback", false, "Fallback to the legacy MAC learning mode") argsNetworkType = pflag.String("network-type", util.NetworkTypeGeneve, "Tunnel encapsulation protocol in overlay networks") @@ -101,8 +103,8 @@ func ParseFlags() *Configuration { argEnableArpDetectIPConflict = pflag.Bool("enable-arp-detect-ip-conflict", true, "Whether to support arp detect ip conflict in vlan network") argKubeletDir = pflag.String("kubelet-dir", "/var/lib/kubelet", "Path of the kubelet dir, default: /var/lib/kubelet") argEnableVerboseConnCheck = pflag.Bool("enable-verbose-conn-check", false, "enable TCP/UDP connectivity check listen port") - argTCPConnectivityCheckPort = pflag.Int("tcp-conn-check-port", 8100, "TCP connectivity Check Port") - argUDPConnectivityCheckPort = pflag.Int("udp-conn-check-port", 8101, "UDP connectivity Check Port") + argTCPConnectivityCheckPort = pflag.Int32("tcp-conn-check-port", 8100, "TCP connectivity Check Port") + argUDPConnectivityCheckPort = pflag.Int32("udp-conn-check-port", 8101, "UDP connectivity Check Port") argEnableTProxy = pflag.Bool("enable-tproxy", false, "enable tproxy for vpc pod liveness or readiness probe") argOVSVsctlConcurrency = pflag.Int32("ovs-vsctl-concurrency", 100, "concurrency limit of ovs-vsctl") ) @@ -138,6 +140,7 @@ func ParseFlags() *Configuration { OvsSocket: *argOvsSocket, KubeConfigFile: *argKubeConfigFile, EnablePprof: *argEnablePprof, + SecureServing: *argSecureServing, PprofPort: *argPprofPort, MacLearningFallback: *argMacLearningFallback, NodeName: strings.ToLower(*argNodeName), diff --git a/pkg/daemon/tproxy_linux.go b/pkg/daemon/tproxy_linux.go index d509fbc450b..d2c63014a7e 100644 --- a/pkg/daemon/tproxy_linux.go +++ b/pkg/daemon/tproxy_linux.go @@ -6,7 +6,6 @@ import ( "io" "net" "strconv" - "strings" "sync" "syscall" @@ -23,23 +22,18 @@ import ( ) var ( - tcpListener net.Listener - customVPCPodIPToNs sync.Map customVPCPodTCPProbeIPPort sync.Map ) func (c *Controller) StartTProxyForwarding() { - var err error - addr := util.GetDefaultListenAddr() - protocol := "tcp" - if strings.HasPrefix(addr, "[") && strings.HasSuffix(addr, "]") { - addr = addr[1 : len(addr)-1] + addr := util.GetDefaultListenAddr() + if util.CheckProtocol(addr) == kubeovnv1.ProtocolIPv6 { protocol = "tcp6" } - tcpListener, err = goTProxy.ListenTCP(protocol, &net.TCPAddr{IP: net.ParseIP(addr), Port: util.TProxyListenPort}) + tcpListener, err := goTProxy.ListenTCP(protocol, &net.TCPAddr{IP: net.ParseIP(addr), Port: util.TProxyListenPort}) if err != nil { klog.Fatalf("Encountered error while binding listener: %s", err) return diff --git a/pkg/ovnmonitor/config.go b/pkg/ovnmonitor/config.go index 1dd2d39d6be..e48070a01bf 100644 --- a/pkg/ovnmonitor/config.go +++ b/pkg/ovnmonitor/config.go @@ -45,6 +45,7 @@ type Configuration struct { ServiceNorthdFileLogPath string ServiceNorthdFilePidPath string EnableMetrics bool + SecureServing bool } // ParseFlags get parameters information. @@ -55,6 +56,7 @@ func ParseFlags() (*Configuration, error) { argPollTimeout = pflag.Int("ovs.timeout", 2, "Timeout on JSON-RPC requests to OVN.") argPollInterval = pflag.Int("ovs.poll-interval", 30, "The minimum interval (in seconds) between collections from OVN server.") argEnableMetrics = pflag.Bool("enable-metrics", true, "Whether to support metrics query") + argSecureServing = pflag.Bool("secure-serving", false, "Whether to serve metrics securely") argSystemRunDir = pflag.String("system.run.dir", "/var/run/openvswitch", "OVS default run directory.") argDatabaseVswitchName = pflag.String("database.vswitch.name", "Open_vSwitch", "The name of OVS db.") @@ -144,6 +146,7 @@ func ParseFlags() (*Configuration, error) { ServiceNorthdFileLogPath: *argServiceNorthdFileLogPath, ServiceNorthdFilePidPath: *argServiceNorthdFilePidPath, EnableMetrics: *argEnableMetrics, + SecureServing: *argSecureServing, } klog.Infof("ovn monitor config is %+v", config) diff --git a/pkg/pinger/ping.go b/pkg/pinger/ping.go index eee78ba3805..87e6b810b7d 100644 --- a/pkg/pinger/ping.go +++ b/pkg/pinger/ping.go @@ -107,13 +107,13 @@ func pingNodes(config *Configuration, setMetrics bool) error { if addr.Type == v1.NodeInternalIP && slices.Contains(config.PodProtocols, util.CheckProtocol(addr.Address)) { func(nodeIP, nodeName string) { if config.EnableVerboseConnCheck { - if err := util.TCPConnectivityCheck(fmt.Sprintf("%s:%d", nodeIP, config.TCPConnCheckPort)); err != nil { + if err := util.TCPConnectivityCheck(util.JoinHostPort(nodeIP, config.TCPConnCheckPort)); err != nil { klog.Infof("TCP connectivity to node %s %s failed", nodeName, nodeIP) pingErr = err } else { klog.Infof("TCP connectivity to node %s %s success", nodeName, nodeIP) } - if err := util.UDPConnectivityCheck(fmt.Sprintf("%s:%d", nodeIP, config.UDPConnCheckPort)); err != nil { + if err := util.UDPConnectivityCheck(util.JoinHostPort(nodeIP, config.UDPConnCheckPort)); err != nil { klog.Infof("UDP connectivity to node %s %s failed", nodeName, nodeIP) pingErr = err } else { @@ -180,14 +180,14 @@ func pingPods(config *Configuration, setMetrics bool) error { if slices.Contains(config.PodProtocols, util.CheckProtocol(podIP.IP)) { func(podIP, podName, nodeIP, nodeName string) { if config.EnableVerboseConnCheck { - if err := util.TCPConnectivityCheck(fmt.Sprintf("%s:%d", podIP, config.TCPConnCheckPort)); err != nil { + if err := util.TCPConnectivityCheck(util.JoinHostPort(podIP, config.TCPConnCheckPort)); err != nil { klog.Infof("TCP connectivity to pod %s %s failed", podName, podIP) pingErr = err } else { klog.Infof("TCP connectivity to pod %s %s success", podName, podIP) } - if err := util.UDPConnectivityCheck(fmt.Sprintf("%s:%d", podIP, config.UDPConnCheckPort)); err != nil { + if err := util.UDPConnectivityCheck(util.JoinHostPort(podIP, config.UDPConnCheckPort)); err != nil { klog.Infof("UDP connectivity to pod %s %s failed", podName, podIP) pingErr = err } else { diff --git a/pkg/server/server.go b/pkg/server/server.go new file mode 100644 index 00000000000..90f511df42c --- /dev/null +++ b/pkg/server/server.go @@ -0,0 +1,101 @@ +package server + +import ( + "fmt" + "net" + "net/http" + "os" + "strconv" + "strings" + + "k8s.io/apiserver/pkg/endpoints/filters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/options" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/scheme" +) + +func SecureServing(addr, svcName string, handler http.Handler) (<-chan struct{}, error) { + host, port, err := net.SplitHostPort(addr) + if err != nil { + klog.Error(err) + return nil, fmt.Errorf("invalid listen address %q: %v", addr, err) + } + + namespace := os.Getenv("POD_NAMESPACE") + podName := os.Getenv("POD_NAME") + podIPs := os.Getenv("POD_IPS") + alternateDNS := []string{podName, svcName, fmt.Sprintf("%s.%s", svcName, namespace), fmt.Sprintf("%s.%s.svc", svcName, namespace)} + alternateIPs := []net.IP{net.ParseIP("127.0.0.1"), net.IPv6loopback} + for _, podIP := range strings.Split(podIPs, ",") { + if ip := net.ParseIP(podIP); ip != nil { + alternateIPs = append(alternateIPs, ip) + } + } + + var clientConfig *rest.Config + opt := options.NewSecureServingOptions().WithLoopback() + authnOpt := options.NewDelegatingAuthenticationOptions() + authzOpt := options.NewDelegatingAuthorizationOptions() + opt.ServerCert.PairName = svcName + opt.ServerCert.CertDirectory = "" + authnOpt.RemoteKubeConfigFileOptional = true + authzOpt.RemoteKubeConfigFileOptional = true + + if host != "" { + ip := net.ParseIP(host) + if ip == nil { + err = fmt.Errorf("invalid listen address: %q", addr) + klog.Error(err) + return nil, err + } + opt.BindAddress = ip + p, err := strconv.Atoi(port) + if err != nil { + klog.Error(err) + return nil, fmt.Errorf("invalid listen address %q: %v", addr, err) + } + opt.BindPort = p + } + + if err = opt.MaybeDefaultWithSelfSignedCerts("localhost", alternateDNS, alternateIPs); err != nil { + klog.Error(err) + return nil, fmt.Errorf("failed to genarate self signed certificates: %v", err) + } + + var serving *server.SecureServingInfo + var authn server.AuthenticationInfo + var authz server.AuthorizationInfo + if err = opt.ApplyTo(&serving, &clientConfig); err != nil { + klog.Error(err) + return nil, fmt.Errorf("failed to apply secure serving options to secure serving info: %v", err) + } + if err = authnOpt.ApplyTo(&authn, serving, nil); err != nil { + klog.Error(err) + return nil, fmt.Errorf("failed to apply authn options to authn info: %v", err) + } + if err = authzOpt.ApplyTo(&authz); err != nil { + klog.Error(err) + return nil, fmt.Errorf("failed to apply authz options to authz info: %v", err) + } + + handler = filters.WithAuthorization(handler, authz.Authorizer, scheme.Codecs) + handler = filters.WithAuthentication(handler, authn.Authenticator, filters.Unauthorized(scheme.Codecs), nil, nil) + + requestInfoResolver := &request.RequestInfoFactory{} + handler = filters.WithRequestInfo(handler, requestInfoResolver) + handler = filters.WithCacheControl(handler) + server.AuthorizeClientBearerToken(clientConfig, &authn, &authz) + + stopCh := make(chan struct{}, 1) + _, listenerStoppedCh, err := serving.Serve(handler, 0, stopCh) + if err != nil { + klog.Error(err) + return nil, fmt.Errorf("failed to serve on %s: %v", addr, err) + } + + return listenerStoppedCh, nil +} diff --git a/pkg/util/net.go b/pkg/util/net.go index 14685cb5973..332d3c094e0 100644 --- a/pkg/util/net.go +++ b/pkg/util/net.go @@ -649,18 +649,10 @@ func UDPConnectivityListen(address string) error { } func GetDefaultListenAddr() string { - addr := "0.0.0.0" if os.Getenv("ENABLE_BIND_LOCAL_IP") == "true" { - podIpsEnv := os.Getenv("POD_IPS") - podIps := strings.Split(podIpsEnv, ",") - // when pod in dual mode, golang can't support bind v4 and v6 address in the same time, - // so not support bind local ip when in dual mode - if len(podIps) == 1 { - addr = podIps[0] - if CheckProtocol(podIps[0]) == kubeovnv1.ProtocolIPv6 { - addr = fmt.Sprintf("[%s]", podIps[0]) - } + if ips := strings.Split(os.Getenv("POD_IPS"), ","); len(ips) == 1 { + return ips[0] } } - return addr + return "0.0.0.0" }