Skip to content

Commit

Permalink
Merge pull request #704 from Mirantis/ivan4th/per-node-config-e2e
Browse files Browse the repository at this point in the history
Add per-node config e2e test
  • Loading branch information
pigmej authored Jun 27, 2018
2 parents 8fe0532 + 32b3149 commit 4e1574a
Show file tree
Hide file tree
Showing 6 changed files with 313 additions and 16 deletions.
7 changes: 6 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -168,12 +168,17 @@ e2e: &e2e
BASE_LOCATION="$PWD" \
deploy/demo.sh
- run:
name: install kubectl
name: Install kubectl
command: |
curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl
echo ${KUBECTL_SHA1} /usr/local/bin/kubectl |sha1sum -c
chmod +x /usr/local/bin/kubectl
- run:
name: Prepare all of the nodes for Virtlet pod
command: |
build/cmd.sh prepare-all-nodes
- run:
name: Run e2e tests
command: |
Expand Down
43 changes: 29 additions & 14 deletions build/cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -276,28 +276,39 @@ function kvm_ok {
}

function prepare_node {
local virtlet_node="${1}"
ensure_build_container
if ! docker exec "${virtlet_node}" dpkg-query -W criproxy-nodeps >&/dev/null; then
echo >&2 "Installing CRI proxy package the node container..."
docker exec "${virtlet_node}" /bin/bash -c "curl -sSL '${CRIPROXY_DEB_URL}' >/criproxy.deb && dpkg -i /criproxy.deb && rm /criproxy.deb"
local node="${1}"
if docker exec "${node}" dpkg-query -W criproxy-nodeps >&/dev/null; then
return 0
fi
ensure_build_container
echo >&2 "Installing CRI proxy package in the node container (${node})..."
docker exec "${node}" /bin/bash -c "curl -sSL '${CRIPROXY_DEB_URL}' >/criproxy.deb && dpkg -i /criproxy.deb && rm /criproxy.deb"

docker exec "${virtlet_node}" mount --make-shared /dind
docker exec "${virtlet_node}" mount --make-shared /dev
docker exec "${virtlet_node}" mount --make-shared /boot
docker exec "${virtlet_node}" mount --make-shared /sys/fs/cgroup
docker exec "${node}" mount --make-shared /dind
docker exec "${node}" mount --make-shared /dev
docker exec "${node}" mount --make-shared /boot
docker exec "${node}" mount --make-shared /sys/fs/cgroup

if [[ ${VIRTLET_ON_MASTER} ]]; then
if [[ $(kubectl get node kube-master -o jsonpath='{.spec.taints[?(@.key=="node-role.kubernetes.io/master")]}') ]]; then
kubectl taint nodes kube-master node-role.kubernetes.io/master-
fi
fi
if [[ ${FORCE_UPDATE_IMAGE} ]] || ! docker exec "${virtlet_node}" docker history -q mirantis/virtlet:latest >&/dev/null; then
if [[ ${FORCE_UPDATE_IMAGE} ]] || ! docker exec "${node}" docker history -q mirantis/virtlet:latest >&/dev/null; then
echo >&2 "Propagating Virtlet image to the node container..."
vcmd "docker save '${virtlet_image}' | docker exec -i '${virtlet_node}' docker load"
vcmd "docker save '${virtlet_image}' | docker exec -i '${node}' docker load"
fi
kubectl label node --overwrite "${virtlet_node}" extraRuntime=virtlet
}

function prepare_all_nodes {
for node in $(kubectl get nodes -o jsonpath='{.items[?(@.metadata.name!="kube-master")].metadata.name}'); do
prepare_node "${node}"
done
}

function apply_runtime_label {
local node="${1}"
kubectl label node --overwrite "${node}" extraRuntime=virtlet
}

function start_dind {
Expand Down Expand Up @@ -623,12 +634,16 @@ case "${cmd}" in
;;
copy-dind-internal)
for virtlet_node in "${virtlet_nodes[@]}"; do
copy_dind_internal "${virtlet_node}"
copy_dind_internal "${virtlet_node}"
done
;;
prepare-all-nodes)
prepare_all_nodes
;;
start-dind)
for virtlet_node in "${virtlet_nodes[@]}"; do
prepare_node "${virtlet_node}"
prepare_node "${virtlet_node}"
apply_runtime_label "${virtlet_node}"
done
start_dind
;;
Expand Down
8 changes: 8 additions & 0 deletions docs/devel/build-tool.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,14 @@ the DIND node that will run Virtlet if it doesn't exist there
or if `FORCE_UPDATE_IMAGE` is set to a non-empty value.
This command requires `kubectl`.

### prepare-all-nodes

Makes all the worker nodes in the `kubeadm-dind-cluster` ready for
Virtlet pod, but doesn't start Virtlet pod on them (it doesn't apply
`extraRuntime=virtlet` label). This is done automatically for the
Virtlet node during `start-dind`, but it's necessary to do so for all
the worker nodes for Virtlet e2e config test to pass.

### vsh

Starts an interactive shell using build container. Useful for debugging.
Expand Down
108 changes: 108 additions & 0 deletions tests/e2e/config_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
/*
Copyright 2018 Mirantis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
. "github.com/onsi/gomega"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"

virtlet_v1 "github.com/Mirantis/virtlet/pkg/api/virtlet.k8s/v1"
"github.com/Mirantis/virtlet/tests/e2e/framework"
. "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext"
)

var _ = Describe("Per-node configuration", func() {
var extraVirtletNode string
var virtletPod *framework.PodInterface
var configMappingNames []string

installVirtletOnExtraNode := func() {
var err error
extraVirtletNode, err = controller.AvailableNodeName()
Expect(err).NotTo(HaveOccurred())
Expect(controller.AddLabelsToNode(extraVirtletNode, map[string]string{
"extraRuntime": "virtlet",
"foobarConfig": "baz",
})).To(Succeed())
virtletPod, err = controller.WaitForVirtletPodOnTheNode(extraVirtletNode)
Expect(err).NotTo(HaveOccurred())
}

createConfigs := func() {
logLevel := 5
rawDevs := "foobar*"
cms := []virtlet_v1.VirtletConfigMapping{
{
ObjectMeta: meta_v1.ObjectMeta{
GenerateName: "virtlet-e2e-",
},
Spec: virtlet_v1.VirtletConfigMappingSpec{
NodeSelector: map[string]string{
"extraRuntime": "virtlet",
},
Config: &virtlet_v1.VirtletConfig{
LogLevel: &logLevel,
},
},
},
{
ObjectMeta: meta_v1.ObjectMeta{
GenerateName: "virtlet-e2e-",
},
Spec: virtlet_v1.VirtletConfigMappingSpec{
NodeSelector: map[string]string{
"foobarConfig": "baz",
},
Config: &virtlet_v1.VirtletConfig{
RawDevices: &rawDevs,
},
},
},
}
for _, cm := range cms {
cm, err := controller.CreateVirtletConfigMapping(cm)
Expect(err).NotTo(HaveOccurred())
if cm != nil {
configMappingNames = append(configMappingNames, cm.Name)
}
}
}

AfterAll(func() {
if extraVirtletNode != "" {
Expect(controller.RemoveLabelOffNode(extraVirtletNode, []string{
"extraRuntime",
"foobarConfig",
})).To(Succeed())
Expect(controller.WaitForVirtletPodToDisappearFromTheNode(extraVirtletNode)).To(Succeed())
}
for _, cmName := range configMappingNames {
Expect(controller.DeleteVirtletConfigMapping(cmName)).To(Succeed())
}
})

It("Should be obtained by combining the Virtlet config mappings that match the node", func() {
createConfigs()
installVirtletOnExtraNode()
virtletContainer, err := virtletPod.Container("virtlet")
Expect(err).NotTo(HaveOccurred())
out, err := framework.RunSimple(virtletContainer, "cat", "/var/lib/virtlet/config.sh")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring("export VIRTLET_RAW_DEVICES=foobar\\*\n"))
Expect(out).To(ContainSubstring("export VIRTLET_LOGLEVEL=5"))
})
})
Loading

0 comments on commit 4e1574a

Please sign in to comment.