This example shows vl3-network recovery after redeploying all clients.
Deploy nsc and vl3 nses:
kubectl apply -k https://github.com/networkservicemesh/deployments-k8s/examples/heal/vl3-nscs-death?ref=aad7c26ad32fb4c3b515179bbe85d59c811c52f1
Wait for clients to be ready:
kubectl wait -n ns-vl3-nscs-death --for=condition=ready --timeout=1m pod -l app=alpine
Find all nscs:
nscs=$(kubectl get pods -l app=alpine -o go-template --template="{{range .items}}{{.metadata.name}} {{end}}" -n ns-vl3-nscs-death)
[[ ! -z $nscs ]]
Ping each client by each client:
(
for nsc in $nscs
do
ipAddr=$(kubectl exec -n ns-vl3-nscs-death $nsc -- ifconfig nsm-1) || exit
ipAddr=$(echo $ipAddr | grep -Eo 'inet addr:[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'| cut -c 11-)
for pinger in $nscs
do
echo $pinger pings $ipAddr
kubectl exec $pinger -n ns-vl3-nscs-death -- ping -c2 -i 0.5 $ipAddr || exit
done
done
)
Scale NSCs to zero:
kubectl scale -n ns-vl3-nscs-death deployment alpine --replicas=0
kubectl wait -n ns-vl3-nscs-death --for=delete --timeout=1m pod -l app=alpine
Rescale NSCs:
kubectl scale -n ns-vl3-nscs-death deployment alpine --replicas=2
kubectl wait -n ns-vl3-nscs-death --for=condition=ready --timeout=1m pod -l app=alpine
Find all new nscs and run ping:
nscs=$(kubectl get pods -l app=alpine -o go-template --template="{{range .items}}{{.metadata.name}} {{end}}" -n ns-vl3-nscs-death)
[[ ! -z $nscs ]]
(
for nsc in $nscs
do
ipAddr=$(kubectl exec -n ns-vl3-nscs-death $nsc -- ifconfig nsm-1) || exit
ipAddr=$(echo $ipAddr | grep -Eo 'inet addr:[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'| cut -c 11-)
for pinger in $nscs
do
echo $pinger pings $ipAddr
kubectl exec $pinger -n ns-vl3-nscs-death -- ping -c2 -i 0.5 $ipAddr || exit
done
done
)
To cleanup the example just follow the next command:
kubectl delete ns ns-vl3-nscs-death