From 49ff10c15d6c980c08ccb71d98d133f7616c1161 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 21 Jul 2023 14:36:37 +0900 Subject: [PATCH 1/2] CICD for loxilb in-cluster mode --- cicd/k3s-flannel-incluster/Vagrantfile | 47 ++++++++ cicd/k3s-flannel-incluster/config.sh | 4 + cicd/k3s-flannel-incluster/k3s.yaml | 19 +++ cicd/k3s-flannel-incluster/kube-loxilb.yml | 130 +++++++++++++++++++++ cicd/k3s-flannel-incluster/loxilb.yaml | 57 +++++++++ cicd/k3s-flannel-incluster/master.sh | 13 +++ cicd/k3s-flannel-incluster/nginx.yml | 28 +++++ cicd/k3s-flannel-incluster/rmconfig.sh | 5 + cicd/k3s-flannel-incluster/sctp.yml | 41 +++++++ cicd/k3s-flannel-incluster/udp.yml | 30 +++++ cicd/k3s-flannel-incluster/validation.sh | 58 +++++++++ cicd/k3s-flannel-incluster/wait_ready.sh | 37 ++++++ cicd/k3s-flannel-incluster/worker.sh | 11 ++ 13 files changed, 480 insertions(+) create mode 100644 cicd/k3s-flannel-incluster/Vagrantfile create mode 100755 cicd/k3s-flannel-incluster/config.sh create mode 100644 cicd/k3s-flannel-incluster/k3s.yaml create mode 100644 cicd/k3s-flannel-incluster/kube-loxilb.yml create mode 100644 cicd/k3s-flannel-incluster/loxilb.yaml create mode 100644 cicd/k3s-flannel-incluster/master.sh create mode 100644 cicd/k3s-flannel-incluster/nginx.yml create mode 100755 cicd/k3s-flannel-incluster/rmconfig.sh create mode 100644 cicd/k3s-flannel-incluster/sctp.yml create mode 100644 cicd/k3s-flannel-incluster/udp.yml create mode 100755 cicd/k3s-flannel-incluster/validation.sh create mode 100755 cicd/k3s-flannel-incluster/wait_ready.sh create mode 100644 cicd/k3s-flannel-incluster/worker.sh diff --git a/cicd/k3s-flannel-incluster/Vagrantfile b/cicd/k3s-flannel-incluster/Vagrantfile new file mode 100644 index 000000000..9b32fcf80 --- /dev/null +++ b/cicd/k3s-flannel-incluster/Vagrantfile @@ -0,0 +1,47 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "2").to_i +#box_name = (ENV['VAGRANT_BOX'] || "ubuntu/focal64") +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + #config.vm.define "loxilb" do |loxilb| + # loxilb.vm.hostname = 'llb1' + #loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp" + # loxilb.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + # loxilb.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + # loxilb.vm.provision :shell, :path => "loxilb.sh" + # loxilb.vm.provider :virtualbox do |vbox| + # vbox.customize ["modifyvm", :id, "--memory", 6000] + # vbox.customize ["modifyvm", :id, "--cpus", 4] + # end + #end + + config.vm.define "master" do |master| + master.vm.hostname = 'master' + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision :shell, :path => "worker.sh" + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + end +end diff --git a/cicd/k3s-flannel-incluster/config.sh b/cicd/k3s-flannel-incluster/config.sh new file mode 100755 index 000000000..89381649c --- /dev/null +++ b/cicd/k3s-flannel-incluster/config.sh @@ -0,0 +1,4 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up +sudo ip route add 123.123.123.1 via 192.168.90.10 || true diff --git a/cicd/k3s-flannel-incluster/k3s.yaml b/cicd/k3s-flannel-incluster/k3s.yaml new file mode 100644 index 000000000..a77c57c56 --- /dev/null +++ b/cicd/k3s-flannel-incluster/k3s.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT0RrNU1EWTNOalV3SGhjTk1qTXdOekl4TURJek1qUTFXaGNOTXpNd056RTRNREl6TWpRMQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT0RrNU1EWTNOalV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUcHh0MUlUT09RbXNSMFFJUGlYaVBCRkhBZHkvVUVaZTdBQzZEalNhcDQKZjV4Y3ZjYW9VaG1lUmM5eFRTTEpaSDdoelFHVXFVYkZjVGdIUTgzL1o0bndvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVU9VdGRvSlZxV1djR0gxZ3hUMmxGCmlkOUVMSmt3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnU21ucmRPS2hCWlZ0clJEZWxBc1lnUXBuZmhQdVJqdHEKRXlGS3JtKzk5TkFDSVFDcDlHbm9hNUpnQVpGSnFydFdZN1B1ZklIaDV4TmRJU1FqTkl5ZVg0WEFxUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://192.168.80.10:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrVENDQVRlZ0F3SUJBZ0lJSWFMZzBLSnJGUDR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOamc1T1RBMk56WTFNQjRYRFRJek1EY3lNVEF5TXpJME5Wb1hEVEkwTURjeQpNREF5TXpJME5Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJOV2NsbnhqQVNHYmI3Z2oKWUVwT0g2M2UxcEczNkRMWllvMHNQVUxrZUVDaUE0SzVLZGxSdnF6aWFGQTRjcjRrdXFoOExRcHl3UXB1ZElsMgp2Sy91V2pTalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUmcrVE5MZzc2Qi9DZWRBT3M5dGVFeEoxbjk4akFLQmdncWhrak9QUVFEQWdOSUFEQkYKQWlFQWlHNm50N0phaUVNY1pLc2NFMEJWTWxjcDdPM2hwOVNjRlVnSGkyL1hJclVDSUh2aWlXaDVmRGFiSERHMgpzekR6Nm9WL2VMTnFJS0ZBSGFQblJEVGdhUHY1Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUyT0RrNU1EWTNOalV3SGhjTk1qTXdOekl4TURJek1qUTFXaGNOTXpNd056RTRNREl6TWpRMQpXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUyT0RrNU1EWTNOalV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSUDZTeWNhdTZOSlVRMU5iTSttSk9MakVCazB4OEpzbXh1aDNITWloalIKTU5VZ1JyS0w1ZUxqTzdlTStGcjNMSWs5eGc4MTNuVDJBdFFzUE5tQmo3Yk9vMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVlQa3pTNE8rZ2Z3bm5RRHJQYlhoCk1TZFovZkl3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnR2NsVzdadlVFZmlrQnBPQUVkUmFmNVgrMkRmTWxoaXEKUXdhb3RJQ3oydXdDSURicjNXM2xFekp3V0hyKzV1aGY2UzVHMFBMNXI1YnVEYkQ2ZW9pZHUxZ20KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBsdWlmRlZvTW9Tb1phTlRwWmlkcitJa2pXSFJrNGNxZVNnL3dKT3YvdkdvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMVp5V2ZHTUJJWnR2dUNOZ1NrNGZyZDdXa2Jmb010bGlqU3c5UXVSNFFLSURncmtwMlZHKwpyT0pvVURoeXZpUzZxSHd0Q25MQkNtNTBpWGE4cis1YU5BPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/cicd/k3s-flannel-incluster/kube-loxilb.yml b/cicd/k3s-flannel-incluster/kube-loxilb.yml new file mode 100644 index 000000000..5fee8ddf9 --- /dev/null +++ b/cicd/k3s-flannel-incluster/kube-loxilb.yml @@ -0,0 +1,130 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:debug + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --externalCIDR=123.123.123.1/24 + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-flannel-incluster/loxilb.yaml b/cicd/k3s-flannel-incluster/loxilb.yaml new file mode 100644 index 000000000..4c6587bfe --- /dev/null +++ b/cicd/k3s-flannel-incluster/loxilb.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + command: [ "/root/loxilb-io/loxilb/loxilb" ] + ports: + - containerPort: 11111 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP diff --git a/cicd/k3s-flannel-incluster/master.sh b/cicd/k3s-flannel-incluster/master.sh new file mode 100644 index 000000000..c4ae9cf3d --- /dev/null +++ b/cicd/k3s-flannel-incluster/master.sh @@ -0,0 +1,13 @@ +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.90' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --disable-cloud-controller \ +--node-ip=${MASTER_IP} --node-external-ip=${MASTER_IP} \ +--bind-address=${MASTER_IP}" sh - + +echo $MASTER_IP > /vagrant/master-ip +sudo cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sudo cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /vagrant/k3s.yaml +sudo kubectl apply -f /vagrant/loxilb.yml +sudo kubectl apply -f /vagrant/kube-loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-incluster/nginx.yml b/cicd/k3s-flannel-incluster/nginx.yml new file mode 100644 index 000000000..e6d7ccec4 --- /dev/null +++ b/cicd/k3s-flannel-incluster/nginx.yml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-lb1 + annotations: + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: nginx-test + ports: + - port: 55002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: nginx-test + labels: + what: nginx-test +spec: + containers: + - name: nginx-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k3s-flannel-incluster/rmconfig.sh b/cicd/k3s-flannel-incluster/rmconfig.sh new file mode 100755 index 000000000..4a6245665 --- /dev/null +++ b/cicd/k3s-flannel-incluster/rmconfig.sh @@ -0,0 +1,5 @@ +#!/bin/bash +sudo ip route del 123.123.123.1 via 192.168.90.10 || true +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f loxilb diff --git a/cicd/k3s-flannel-incluster/sctp.yml b/cicd/k3s-flannel-incluster/sctp.yml new file mode 100644 index 000000000..292c2584e --- /dev/null +++ b/cicd/k3s-flannel-incluster/sctp.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb1 + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-test + ports: + - port: 55004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-test + labels: + what: sctp-test +spec: + containers: + - name: sctp-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k3s-flannel-incluster/udp.yml b/cicd/k3s-flannel-incluster/udp.yml new file mode 100644 index 000000000..d56720dee --- /dev/null +++ b/cicd/k3s-flannel-incluster/udp.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb1 + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-test + ports: + - port: 55003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-test + labels: + what: udp-test +spec: + containers: + - name: udp-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k3s-flannel-incluster/validation.sh b/cicd/k3s-flannel-incluster/validation.sh new file mode 100755 index 000000000..c96f4ff18 --- /dev/null +++ b/cicd/k3s-flannel-incluster/validation.sh @@ -0,0 +1,58 @@ +#!/bin/bash +source ../common.sh +echo k3s-flannel-cluster + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 45 +extIP="123.123.123.1" +echo $extIP + +echo "Service Info" +vagrant ssh master -c 'sudo kubectl get svc' +echo "LB Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get lb -o wide' +echo "EP Info" +vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get ep -o wide' + +print_debug_info() { + echo "llb1 route-info" + vagrant ssh loxilb -c 'ip route' + vagrant ssh master -c 'sudo kubectl get pods -A' + vagrant ssh master -c 'sudo kubectl get svc' + vagrant ssh master -c 'sudo kubectl get nodes' +} + +out=$(curl -s --connect-timeout 10 http://$extIP:55002) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) tcp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) tcp [FAILED]" + print_debug_info + exit 1 +fi + +out=$(timeout 10 ../common/udp_client $extIP 55003) +if [[ ${out} == *"Client"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) udp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) udp [FAILED]" + print_debug_info + exit 1 +fi + +out=$(timeout 10 ../common/sctp_client 192.168.90.1 41291 $extIP 55004) +if [[ ${out} == *"server1"* ]]; then + echo "k3s-flannel-cluster (kube-loxilb) sctp [OK]" +else + echo "k3s-flannel-cluster (kube-loxilb) sctp [FAILED]" + print_debug_info + exit 1 +fi + +exit diff --git a/cicd/k3s-flannel-incluster/wait_ready.sh b/cicd/k3s-flannel-incluster/wait_ready.sh new file mode 100755 index 000000000..5ff06e373 --- /dev/null +++ b/cicd/k3s-flannel-incluster/wait_ready.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function wait_cluster_ready { + Res=$(sudo kubectl get pods -A | + while IFS= read -r line; do + if [[ "$line" != *"Running"* && "$line" != *"READY"* ]]; then + echo "not ready" + return + fi + done) + if [[ $Res == *"not ready"* ]]; then + return 1 + fi + return 0 +} + +function wait_cluster_ready_full { + i=1 + nr=0 + for ((;;)) do + wait_cluster_ready + nr=$? + if [[ $nr == 0 ]]; then + echo "Cluster is ready" + break + fi + i=$(( $i + 1 )) + if [[ $i -ge 40 ]]; then + echo "Cluster is not ready.Giving up" + exit 1 + fi + echo "Cluster is not ready...." + sleep 10 + done +} + +wait_cluster_ready_full diff --git a/cicd/k3s-flannel-incluster/worker.sh b/cicd/k3s-flannel-incluster/worker.sh new file mode 100644 index 000000000..84986cc0c --- /dev/null +++ b/cicd/k3s-flannel-incluster/worker.sh @@ -0,0 +1,11 @@ +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) + +sudo mkdir -p /etc/rancher/k3s +sudo cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml +curl -sfL https://get.k3s.io | K3S_URL="https://${MASTER_ADDR}:6443" K3S_TOKEN="${NODE_TOKEN}" INSTALL_K3S_EXEC="--node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR}" sh - +sudo kubectl apply -f /vagrant/nginx.yml +sudo kubectl apply -f /vagrant/udp.yml +sudo kubectl apply -f /vagrant/sctp.yml +/vagrant/wait_ready.sh From 31b24a82724c12bc175c18f899086517e8377d32 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Fri, 21 Jul 2023 23:14:36 +0900 Subject: [PATCH 2/2] kube-loxilb yaml changed for cicd runs --- cicd/k3s-sctpmh/kube-loxilb.yml | 4 +++- cicd/k8s-nat64/kube-loxilb.yaml | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cicd/k3s-sctpmh/kube-loxilb.yml b/cicd/k3s-sctpmh/kube-loxilb.yml index 8ce7d8ed2..cd69d2fa9 100644 --- a/cicd/k3s-sctpmh/kube-loxilb.yml +++ b/cicd/k3s-sctpmh/kube-loxilb.yml @@ -114,7 +114,9 @@ spec: - --externalCIDR=123.123.123.1/24 - --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 #- --monitor - - --setBGP + - --setBGP=64511 + #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 + #- --setRoles #- --setLBMode=2 #- --config=/opt/loxilb/agent/kube-loxilb.conf resources: diff --git a/cicd/k8s-nat64/kube-loxilb.yaml b/cicd/k8s-nat64/kube-loxilb.yaml index 5622bfdeb..655df07dd 100644 --- a/cicd/k8s-nat64/kube-loxilb.yaml +++ b/cicd/k8s-nat64/kube-loxilb.yaml @@ -113,7 +113,7 @@ spec: - --loxiURL=http://192.168.59.101:11111,http://192.168.59.111:11111 #- --externalCIDR=123.123.123.1/24 - --externalCIDR=3ffe::1/96 - - --setBGP=true + - --setBGP=64511 - --setLBMode=2 #- --config=/opt/loxilb/agent/kube-loxilb.conf resources: