Skip to content

Commit

Permalink
update checks and tests after module args update
Browse files Browse the repository at this point in the history
  • Loading branch information
juanvallejo committed Jul 27, 2017
1 parent 1e11488 commit c0591d3
Show file tree
Hide file tree
Showing 9 changed files with 55 additions and 80 deletions.
7 changes: 4 additions & 3 deletions roles/openshift_health_checker/openshift_checks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def get_var(self, *keys, **kwargs):
return value

@staticmethod
def get_openshift_version(openshift_image_tag):
def get_major_minor_version(openshift_image_tag):
"""Parse and return the deployed version of OpenShift as a tuple."""
if openshift_image_tag and openshift_image_tag[0] == 'v':
openshift_image_tag = openshift_image_tag[1:]
Expand All @@ -125,8 +125,9 @@ def get_openshift_version(openshift_image_tag):
if components[0] in openshift_major_release_version:
components[0] = openshift_major_release_version[components[0]]

components = [int(x) for x in components[:2]]
return tuple(components)
components = tuple(int(x) for x in components[:2])
return components


LOADER_EXCLUDES = (
"__init__.py",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,4 +198,3 @@ def _check_elasticsearch_diskspace(self, pods_by_name):
))

return error_msgs

Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Module for performing checks on a Fluentd logging deployment configuration
"""

from openshift_checks import get_var, OpenShiftCheckException
from openshift_checks import OpenShiftCheckException
from openshift_checks.logging.logging import LoggingCheck


Expand All @@ -15,25 +15,25 @@ def is_active(self):
logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)

try:
version = self.get_openshift_version(self.get_var("openshift_image_tag"))
version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
except ValueError:
# if failed to parse OpenShift version, perform check anyway (if logging enabled)
return logging_deployed

return logging_deployed and version < (3, 6)

def run(self, tmp, task_vars):
def run(self):
"""Check that Fluentd has running pods, and that its logging config matches Docker's logging config."""
self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default=self.logging_namespace)
config_error = self.check_logging_config(task_vars)
self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
config_error = self.check_logging_config()
if config_error:
msg = ("The following Fluentd logging configuration problem was found:"
"\n{}".format(config_error))
return {"failed": True, "msg": msg}

return {}

def check_logging_config(self, task_vars):
def check_logging_config(self):
"""Ensure that the configured Docker logging driver matches fluentd settings.
This means that, at least for now, if the following condition is met:
Expand All @@ -42,15 +42,15 @@ def check_logging_config(self, task_vars):
then the value of the configured Docker logging driver should be "journald".
Otherwise, the value of the Docker logging driver should be "json-file".
Returns an error string if the above condition is not met, or None otherwise."""
use_journald = get_var(task_vars, "openshift_logging_fluentd_use_journal", default=True)
use_journald = self.get_var("openshift_logging_fluentd_use_journal", default=True)

# if check is running on a master, retrieve all running pods
# and check any pod's container for the env var "USE_JOURNAL"
group_names = get_var(task_vars, "group_names")
group_names = self.get_var("group_names")
if "masters" in group_names:
use_journald = self.check_fluentd_env_var(task_vars)
use_journald = self.check_fluentd_env_var()

docker_info = self.execute_module("docker_info", {}, task_vars)
docker_info = self.execute_module("docker_info", {})
try:
logging_driver = docker_info["info"]["LoggingDriver"]
except KeyError:
Expand Down Expand Up @@ -92,9 +92,9 @@ def check_logging_config(self, task_vars):

return error

def check_fluentd_env_var(self, task_vars):
def check_fluentd_env_var(self):
"""Read and return the value of the 'USE_JOURNAL' environment variable on a fluentd pod."""
running_pods = self.running_fluentd_pods(task_vars)
running_pods = self.running_fluentd_pods()

try:
pod_containers = running_pods[0]["spec"]["containers"]
Expand All @@ -118,13 +118,11 @@ def check_fluentd_env_var(self, task_vars):

return False

def running_fluentd_pods(self, task_vars):
def running_fluentd_pods(self):
"""Return a list of running fluentd pods."""
fluentd_pods, error = self.get_pods_for_component(
self.execute_module,
self.logging_namespace,
"fluentd",
task_vars,
)
if error:
msg = 'Unable to retrieve any pods for the "fluentd" logging component: {}'.format(error)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ class Kibana(LoggingCheck):
name = "kibana"
tags = ["health", "logging"]


def run(self):
"""Check various things and gather errors. Returns: result as hash"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def run(self):

def get_required_ovs_version(self):
"""Return the correct Open vSwitch version for the current OpenShift version"""
openshift_version_tuple = self.get_openshift_version(self.get_var("openshift_image_tag"))
openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))

if openshift_version_tuple < (3, 5):
return self.openshift_to_ovs_version["3.4"]
Expand Down
29 changes: 13 additions & 16 deletions roles/openshift_health_checker/test/elasticsearch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,6 @@
task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))


def canned_elasticsearch(task_vars=None, exec_oc=None):
"""Create an Elasticsearch check object with canned exec_oc method"""
check = Elasticsearch("dummy", task_vars or {}) # fails if a module is actually invoked
if exec_oc:
check._exec_oc = exec_oc
return check


def assert_error(error, expect_error):
if expect_error:
assert error
Expand Down Expand Up @@ -50,10 +42,10 @@ def assert_error(error, expect_error):


def test_check_elasticsearch():
assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([])
assert 'No logging Elasticsearch pods' in Elasticsearch().check_elasticsearch([])

# canned oc responses to match so all the checks pass
def _exec_oc(cmd, args):
def _exec_oc(ns, cmd, args):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
Expand All @@ -65,7 +57,9 @@ def _exec_oc(cmd, args):
else:
raise Exception(cmd)

assert not canned_elasticsearch({}, _exec_oc).check_elasticsearch([plain_es_pod])
check = Elasticsearch(None, {})
check.exec_oc = _exec_oc
assert not check.check_elasticsearch([plain_es_pod])


def pods_by_name(pods):
Expand All @@ -88,8 +82,8 @@ def pods_by_name(pods):
])
def test_check_elasticsearch_masters(pods, expect_error):
test_pods = list(pods)
check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: test_pods.pop(0)['_test_master_name_str'])

check = Elasticsearch(None, task_vars_config_base)
check.execute_module = lambda cmd, args: {'result': test_pods.pop(0)['_test_master_name_str']}
errors = check._check_elasticsearch_masters(pods_by_name(pods))
assert_error(''.join(errors), expect_error)

Expand Down Expand Up @@ -124,7 +118,8 @@ def test_check_elasticsearch_masters(pods, expect_error):
),
])
def test_check_elasticsearch_node_list(pods, node_list, expect_error):
check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
check = Elasticsearch(None, task_vars_config_base)
check.execute_module = lambda cmd, args: {'result': json.dumps(node_list)}

errors = check._check_elasticsearch_node_list(pods_by_name(pods))
assert_error(''.join(errors), expect_error)
Expand All @@ -149,7 +144,8 @@ def test_check_elasticsearch_node_list(pods, node_list, expect_error):
])
def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
test_health_data = list(health_data)
check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(test_health_data.pop(0)))
check = Elasticsearch(None, task_vars_config_base)
check.execute_module = lambda cmd, args: {'result': json.dumps(test_health_data.pop(0))}

errors = check._check_es_cluster_health(pods_by_name(pods))
assert_error(''.join(errors), expect_error)
Expand All @@ -174,7 +170,8 @@ def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
),
])
def test_check_elasticsearch_diskspace(disk_data, expect_error):
check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: disk_data)
check = Elasticsearch(None, task_vars_config_base)
check.execute_module = lambda cmd, args: {'result': disk_data}

errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
assert_error(''.join(errors), expect_error)
42 changes: 18 additions & 24 deletions roles/openshift_health_checker/test/fluentd_config_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,6 @@
from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException


@pytest.fixture
def fluentd_config():
return FluentdConfig("dummy")


def canned_fluentd_pod(containers):
return {
"metadata": {
Expand Down Expand Up @@ -76,7 +71,7 @@ def canned_fluentd_pod(containers):
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
def execute_module(module_name, args, task_vars):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
Expand All @@ -94,9 +89,9 @@ def execute_module(module_name, args, task_vars):
),
)

check = fluentd_config()
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config(task_vars)
error = check.check_logging_config()

assert error is None

Expand All @@ -122,7 +117,7 @@ def execute_module(module_name, args, task_vars):
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
def execute_module(module_name, args, task_vars):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
Expand All @@ -140,9 +135,9 @@ def execute_module(module_name, args, task_vars):
),
)

check = fluentd_config()
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config(task_vars)
error = check.check_logging_config()

assert error is not None
for word in words:
Expand Down Expand Up @@ -186,7 +181,7 @@ def execute_module(module_name, args, task_vars):
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master(name, pods, logging_driver, extra_words):
def execute_module(module_name, args, task_vars):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
Expand All @@ -203,13 +198,13 @@ def execute_module(module_name, args, task_vars):
),
)

def get_pods(execute_module, namespace, logging_component, task_vars):
def get_pods(namespace, logging_component):
return pods, None

check = fluentd_config()
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = get_pods
error = check.check_logging_config(task_vars)
error = check.check_logging_config()

assert error is None

Expand Down Expand Up @@ -271,7 +266,7 @@ def get_pods(execute_module, namespace, logging_component, task_vars):
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_failed(name, pods, logging_driver, words):
def execute_module(module_name, args, task_vars):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
Expand All @@ -288,13 +283,13 @@ def execute_module(module_name, args, task_vars):
),
)

def get_pods(execute_module, namespace, logging_component, task_vars):
def get_pods(namespace, logging_component):
return pods, None

check = fluentd_config()
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = get_pods
error = check.check_logging_config(task_vars)
error = check.check_logging_config()

assert error is not None
for word in words:
Expand Down Expand Up @@ -331,7 +326,7 @@ def get_pods(execute_module, namespace, logging_component, task_vars):
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
def execute_module(module_name, args, task_vars):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
Expand All @@ -348,15 +343,14 @@ def execute_module(module_name, args, task_vars):
),
)

def get_pods(execute_module, namespace, logging_component, task_vars):
def get_pods(namespace, logging_component):
return pods, None

check = fluentd_config()
check.execute_module = execute_module
check = FluentdConfig(execute_module, task_vars)
check.get_pods_for_component = get_pods

with pytest.raises(OpenShiftCheckException) as error:
check.check_logging_config(task_vars)
check.check_logging_config()

assert error is not None
for word in extra_words:
Expand Down
12 changes: 2 additions & 10 deletions roles/openshift_health_checker/test/fluentd_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,6 @@
from openshift_checks.logging.fluentd import Fluentd


def canned_fluentd(exec_oc=None):
"""Create a Fluentd check object with canned exec_oc method"""
check = Fluentd("dummy") # fails if a module is actually invoked
if exec_oc:
check._exec_oc = exec_oc
return check


def assert_error(error, expect_error):
if expect_error:
assert error
Expand Down Expand Up @@ -103,7 +95,7 @@ def assert_error(error, expect_error):
),
])
def test_get_fluentd_pods(pods, nodes, expect_error):
check = canned_fluentd(exec_oc=lambda cmd, args: json.dumps(dict(items=nodes)))

check = Fluentd()
check.exec_oc = lambda ns, cmd, args: json.dumps(dict(items=nodes))
error = check.check_fluentd(pods)
assert_error(error, expect_error)
Loading

0 comments on commit c0591d3

Please sign in to comment.