From 979da3388a2cd7e70c9a9391d4e1fc6f9eb86e0b Mon Sep 17 00:00:00 2001 From: Katrina Verey Date: Thu, 11 May 2017 20:12:13 -0400 Subject: [PATCH] Rework deploy output --- lib/kubernetes-deploy.rb | 1 + .../deferred_summary_logging.rb | 87 +++++++++ .../ejson_secret_provisioner.rb | 4 + lib/kubernetes-deploy/formatted_logger.rb | 13 +- lib/kubernetes-deploy/kubernetes_resource.rb | 97 ++++++++-- .../kubernetes_resource/bugsnag.rb | 1 - .../kubernetes_resource/cloudsql.rb | 8 +- .../kubernetes_resource/config_map.rb | 5 +- .../kubernetes_resource/deployment.rb | 25 ++- .../kubernetes_resource/ingress.rb | 5 - .../persistent_volume_claim.rb | 4 +- .../kubernetes_resource/pod.rb | 67 ++++--- .../pod_disruption_budget.rb | 5 +- .../kubernetes_resource/pod_template.rb | 5 +- .../kubernetes_resource/redis.rb | 9 +- .../kubernetes_resource/service.rb | 8 +- lib/kubernetes-deploy/resource_watcher.rb | 27 ++- lib/kubernetes-deploy/restart_task.rb | 7 +- lib/kubernetes-deploy/runner.rb | 176 ++++++++++++------ lib/kubernetes-deploy/runner_task.rb | 11 +- lib/kubernetes-deploy/ui_helpers.rb | 15 -- test/integration/kubernetes_deploy_test.rb | 111 +++++++++-- test/test_helper.rb | 14 +- .../resource_watcher_test.rb | 25 +-- 24 files changed, 550 insertions(+), 180 deletions(-) create mode 100644 lib/kubernetes-deploy/deferred_summary_logging.rb delete mode 100644 lib/kubernetes-deploy/ui_helpers.rb diff --git a/lib/kubernetes-deploy.rb b/lib/kubernetes-deploy.rb index 84ef9b1e0..1399503b1 100644 --- a/lib/kubernetes-deploy.rb +++ b/lib/kubernetes-deploy.rb @@ -5,6 +5,7 @@ require 'active_support/core_ext/string/inflections' require 'active_support/core_ext/string/strip' require 'active_support/core_ext/hash/keys' +require 'active_support/core_ext/array/conversions' require 'colorized_string' require 'kubernetes-deploy/errors' diff --git a/lib/kubernetes-deploy/deferred_summary_logging.rb b/lib/kubernetes-deploy/deferred_summary_logging.rb new file mode 100644 index 000000000..921ee6460 --- /dev/null +++ b/lib/kubernetes-deploy/deferred_summary_logging.rb @@ -0,0 +1,87 @@ +module KubernetesDeploy + # Adds the methods kubernetes-deploy requires to your logger class. + # These methods include helpers for logging consistent headings, as well as facilities for + # displaying key information later, in a summary section, rather than when it occurred. + module DeferredSummaryLogging + attr_reader :summary + def initialize(*args) + reset + super + end + + def reset + @summary = DeferredSummary.new + @current_phase = 0 + end + + def blank_line(level = :info) + public_send(level, "") + end + + def phase_heading(phase_name) + @current_phase += 1 + heading("Phase #{@current_phase}: #{phase_name}") + end + + def heading(text, secondary_msg = '', secondary_msg_color = :blue) + padding = (100.0 - (text.length + secondary_msg.length)) / 2 + blank_line + part1 = ColorizedString.new("#{'-' * padding.floor}#{text}").blue + part2 = ColorizedString.new(secondary_msg).colorize(secondary_msg_color) + part3 = ColorizedString.new('-' * padding.ceil).blue + info(part1 + part2 + part3) + end + + # Outputs the deferred summary information saved via @logger.summary.add_action and @logger.summary.add_paragraph + def print_summary(success) + if success + heading("Result: ", "SUCCESS", :green) + level = :info + else + heading("Result: ", "FAILURE", :red) + level = :fatal + end + + public_send(level, summary.actions_sentence) + summary.paragraphs.each do |para| + blank_line(level) + msg_lines = para.split("\n") + msg_lines.each { |line| public_send(level, line) } + end + end + + class DeferredSummary + attr_reader :paragraphs + + def initialize + @actions_taken = [] + @paragraphs = [] + end + + def actions_sentence + case @actions_taken.length + when 0 then "No actions taken" + else + @actions_taken.to_sentence.capitalize + end + end + + # Saves a sentence fragment to be displayed in the first sentence of the summary section + # + # Example: + # # The resulting summary will begin with "Created 3 secrets and failed to deploy 2 resources" + # @logger.summary.add_action("created 3 secrets") + # @logger.summary.add_cation("failed to deploy 2 resources") + def add_action(sentence_fragment) + @actions_taken << sentence_fragment + end + + # Adds a paragraph to be displayed in the summary section + # Paragraphs will be printed in the order they were added, separated by a blank line + # This can be used to log a block of data on a particular topic, e.g. debug info for a particular failed resource + def add_paragraph(paragraph) + paragraphs << paragraph + end + end + end +end diff --git a/lib/kubernetes-deploy/ejson_secret_provisioner.rb b/lib/kubernetes-deploy/ejson_secret_provisioner.rb index 9307cd3db..1255cfa46 100644 --- a/lib/kubernetes-deploy/ejson_secret_provisioner.rb +++ b/lib/kubernetes-deploy/ejson_secret_provisioner.rb @@ -49,6 +49,7 @@ def create_secrets validate_secret_spec(secret_name, secret_spec) create_or_update_secret(secret_name, secret_spec["_type"], secret_spec["data"]) end + @logger.summary.add_action("created/updated #{secrets.length} #{'secret'.pluralize(secrets.length)}") end end @@ -56,16 +57,19 @@ def prune_managed_secrets ejson_secret_names = encrypted_ejson.fetch(MANAGED_SECRET_EJSON_KEY, {}).keys live_secrets = run_kubectl_json("get", "secrets") + prune_count = 0 live_secrets.each do |secret| secret_name = secret["metadata"]["name"] next unless secret_managed?(secret) next if ejson_secret_names.include?(secret_name) @logger.info("Pruning secret #{secret_name}") + prune_count += 1 out, err, st = @kubectl.run("delete", "secret", secret_name) @logger.debug(out) raise EjsonSecretError, err unless st.success? end + @logger.summary.add_action("pruned #{prune_count} #{'secret'.pluralize(prune_count)}") if prune_count > 0 end def managed_secrets_exist? diff --git a/lib/kubernetes-deploy/formatted_logger.rb b/lib/kubernetes-deploy/formatted_logger.rb index 41280b8d4..934789cf5 100644 --- a/lib/kubernetes-deploy/formatted_logger.rb +++ b/lib/kubernetes-deploy/formatted_logger.rb @@ -1,8 +1,11 @@ # frozen_string_literal: true require 'logger' +require 'kubernetes-deploy/deferred_summary_logging' module KubernetesDeploy class FormattedLogger < Logger + include DeferredSummaryLogging + def self.build(namespace, context, stream = $stderr, verbose_prefix: false) l = new(stream) l.level = level_from_env @@ -13,11 +16,11 @@ def self.build(namespace, context, stream = $stderr, verbose_prefix: false) case severity when "FATAL" + ColorizedString.new("[#{severity}][#{datetime}]#{middle}\t").red + "#{msg}\n" + when "ERROR" colorized_line.red - when "ERROR", "WARN" + when "WARN" colorized_line.yellow - when "INFO" - msg =~ /^\[(KUBESTATUS|Pod)/ ? colorized_line : colorized_line.blue else colorized_line end @@ -35,9 +38,5 @@ def self.level_from_env end end private_class_method :level_from_env - - def blank_line(level = :info) - public_send(level, "") - end end end diff --git a/lib/kubernetes-deploy/kubernetes_resource.rb b/lib/kubernetes-deploy/kubernetes_resource.rb index 7f8c85282..9820a0800 100644 --- a/lib/kubernetes-deploy/kubernetes_resource.rb +++ b/lib/kubernetes-deploy/kubernetes_resource.rb @@ -1,3 +1,4 @@ +# frozen_string_literal: true require 'json' require 'open3' require 'shellwords' @@ -10,6 +11,16 @@ class KubernetesResource TIMEOUT = 5.minutes + DEBUG_RESOURCE_NOT_FOUND_MESSAGE = "None found. Please check your usual logging service (e.g. Splunk)." + UNUSUAL_FAILURE_MESSAGE = <<-MSG.strip_heredoc + It is very unusual for this resource type to fail to deploy. Please try the deploy again. + If that new deploy also fails, contact your cluster administrator. + MSG + STANDARD_TIMEOUT_MESSAGE = <<-MSG.strip_heredoc + Kubernetes will continue to attempt to deploy this resource in the cluster, but at this point it is considered unlikely that it will succeed. + If you have reason to believe it will succeed, retry the deploy to continue to monitor the rollout. + MSG + def self.for_type(type:, name:, namespace:, context:, file:, logger:) subclass = case type when 'cloudsql' then Cloudsql @@ -56,7 +67,6 @@ def id end def sync - log_status end def deploy_failed? @@ -77,7 +87,6 @@ def exists? def status @status ||= "Unknown" - deploy_timed_out? ? "Timed out with status #{@status}" : @status end def type @@ -103,24 +112,80 @@ def deploy_method tpr? ? :replace : :apply end - def status_data - { - group: group_name, - name: name, - status_string: status, - exists: exists?, - succeeded: deploy_succeeded?, - failed: deploy_failed?, - timed_out: deploy_timed_out? - } + def debug_message + helpful_info = [] + if deploy_failed? + helpful_info << ColorizedString.new("#{id}: FAILED").red + helpful_info << failure_message if failure_message.present? + else + helpful_info << ColorizedString.new("#{id}: TIMED OUT").yellow + " (limit: #{timeout}s)" + helpful_info << timeout_message if timeout_message.present? + end + helpful_info << " - Final status: #{status}" + + events = fetch_events + if events.present? + helpful_info << " - Events:" + events.each do |identifier, event_hashes| + event_hashes.each { |event| helpful_info << " [#{identifier}]\t#{event}" } + end + else + helpful_info << " - Events: #{DEBUG_RESOURCE_NOT_FOUND_MESSAGE}" + end + + if respond_to?(:fetch_logs) + container_logs = fetch_logs + if container_logs.blank? || container_logs.values.all?(&:blank?) + helpful_info << " - Logs: #{DEBUG_RESOURCE_NOT_FOUND_MESSAGE}" + else + helpful_info << " - Logs:" + container_logs.each do |identifier, logs| + logs.split("\n").each do |line| + helpful_info << " [#{identifier}]\t#{line}" + end + end + end + end + + helpful_info.join("\n") + end + + # Returns a hash in the following format: + # { + # "pod/web-1" => [ + # {"subject_kind" => "Pod", "some" => "stuff"}, # event 1 + # {"subject_kind" => "Pod", "other" => "stuff"}, # event 2 + # ] + # } + def fetch_events + return {} unless exists? + fields = "{.involvedObject.kind}\t{.count}\t{.message}\t{.reason}" + out, _err, st = kubectl.run("get", "events", + %(--output=jsonpath={range .items[?(@.involvedObject.name=="#{name}")]}#{fields}\n{end})) + return {} unless st.success? + + event_collector = Hash.new { |hash, key| hash[key] = [] } + out.split("\n").each_with_object(event_collector) do |event_blob, events| + pieces = event_blob.split("\t") + subject_kind = pieces[0] + # jsonpath can only filter by one thing at a time, and we chose involvedObject.name + # This means we still need to filter by involvedObject.kind here to make sure we only retain relevant events + next unless subject_kind.downcase == type.downcase + events[id] << "#{pieces[3] || 'Unknown'}: #{pieces[2]} (#{pieces[1]} events)" # Reason: Message (X events) + end + end + + def timeout_message + STANDARD_TIMEOUT_MESSAGE end - def group_name - type.downcase.pluralize + def failure_message end - def log_status - @logger.info("[KUBESTATUS] #{JSON.dump(status_data)}") + def pretty_status + padding = " " * (50 - id.length) + msg = exists? ? status : "not found" + "#{id}#{padding}#{msg}" end def kubectl diff --git a/lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb b/lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb index 2f4b7f5da..94e655045 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb @@ -12,7 +12,6 @@ def sync @secret_found = secrets.split.any? { |s| s.end_with?("-bugsnag") } end @status = @secret_found ? "Available" : "Unknown" - log_status end def deploy_succeeded? diff --git a/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb b/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb index 213720102..f932f25e6 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb @@ -6,17 +6,17 @@ class Cloudsql < KubernetesResource def sync _, _err, st = kubectl.run("get", type, @name) @found = st.success? - @status = if cloudsql_proxy_deployment_exists? && mysql_service_exists? + @deployment_exists = cloudsql_proxy_deployment_exists? + @service_exists = mysql_service_exists? + @status = if @deployment_exists && @service_exists "Provisioned" else "Unknown" end - - log_status end def deploy_succeeded? - cloudsql_proxy_deployment_exists? && mysql_service_exists? + @service_exists && @deployment_exists end def deploy_failed? diff --git a/lib/kubernetes-deploy/kubernetes_resource/config_map.rb b/lib/kubernetes-deploy/kubernetes_resource/config_map.rb index df5841bf3..861373b55 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/config_map.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/config_map.rb @@ -7,7 +7,6 @@ def sync _, _err, st = kubectl.run("get", type, @name) @status = st.success? ? "Available" : "Unknown" @found = st.success? - log_status end def deploy_succeeded? @@ -18,6 +17,10 @@ def deploy_failed? false end + def timeout_message + UNUSUAL_FAILURE_MESSAGE + end + def exists? @found end diff --git a/lib/kubernetes-deploy/kubernetes_resource/deployment.rb b/lib/kubernetes-deploy/kubernetes_resource/deployment.rb index 9626dc558..762db0669 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/deployment.rb @@ -8,12 +8,13 @@ def sync @found = st.success? @rollout_data = {} @status = nil + @representative_pod = nil @pods = [] if @found @rollout_data = JSON.parse(json_data)["status"] .slice("updatedReplicas", "replicas", "availableReplicas", "unavailableReplicas") - @status, _err, _ = kubectl.run("rollout", "status", type, @name, "--watch=false") if @deploy_started + @status = @rollout_data.map { |replica_state, num| "#{num} #{replica_state}" }.join(", ") pod_list, _err, st = kubectl.run("get", "pods", "-a", "-l", "name=#{name}", "--output=json") if st.success? @@ -30,12 +31,24 @@ def sync ) pod.deploy_started = @deploy_started pod.interpret_json_data(pod_json) + + if !@representative_pod && pod_probably_new?(pod_json) + @representative_pod = pod + end @pods << pod end end end + end - log_status + def fetch_logs + @representative_pod ? @representative_pod.fetch_logs : {} + end + + def fetch_events + own_events = super + return own_events unless @representative_pod + own_events.merge(@representative_pod.fetch_events) end def deploy_succeeded? @@ -59,8 +72,12 @@ def exists? @found end - def status_data - super.merge(replicas: @rollout_data, num_pods: @pods.length) + private + + def pod_probably_new?(pod_json) + return false unless @deploy_started + # Shitty, brittle workaround to identify a pod from the new ReplicaSet before implementing ReplicaSet awareness + Time.parse(pod_json["metadata"]["creationTimestamp"]) >= (@deploy_started - 30.seconds) end end end diff --git a/lib/kubernetes-deploy/kubernetes_resource/ingress.rb b/lib/kubernetes-deploy/kubernetes_resource/ingress.rb index be6d3b5df..790284873 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/ingress.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/ingress.rb @@ -7,7 +7,6 @@ def sync _, _err, st = kubectl.run("get", type, @name) @status = st.success? ? "Created" : "Unknown" @found = st.success? - log_status end def deploy_succeeded? @@ -21,9 +20,5 @@ def deploy_failed? def exists? @found end - - def group_name - "Ingresses" - end end end diff --git a/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb b/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb index bd1b4321d..22a82643e 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb @@ -4,9 +4,9 @@ class PersistentVolumeClaim < KubernetesResource TIMEOUT = 5.minutes def sync - @status, _err, st = kubectl.run("get", type, @name, "--output=jsonpath={.status.phase}") + out, _err, st = kubectl.run("get", type, @name, "--output=jsonpath={.status.phase}") @found = st.success? - log_status + @status = out if @found end def deploy_succeeded? diff --git a/lib/kubernetes-deploy/kubernetes_resource/pod.rb b/lib/kubernetes-deploy/kubernetes_resource/pod.rb index 1e7401605..3ae777d03 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/pod.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/pod.rb @@ -2,16 +2,15 @@ module KubernetesDeploy class Pod < KubernetesResource TIMEOUT = 10.minutes - SUSPICIOUS_CONTAINER_STATES = %w(ImagePullBackOff RunContainerError).freeze + SUSPICIOUS_CONTAINER_STATES = %w(ImagePullBackOff RunContainerError ErrImagePull).freeze - def initialize(name:, namespace:, context:, file:, logger:, parent: nil) + def initialize(name:, namespace:, context:, file:, parent: nil, logger:) @name = name @namespace = namespace @context = context @file = file @parent = parent @logger = logger - @bare = !@parent end def sync @@ -24,8 +23,7 @@ def sync @ready = false @containers = [] end - log_status - display_logs if @bare && deploy_finished? + display_logs if unmanaged? && deploy_succeeded? end def interpret_json_data(pod_data) @@ -53,7 +51,7 @@ def interpret_json_data(pod_data) end def deploy_succeeded? - if @bare + if unmanaged? @phase == "Succeeded" else @phase == "Running" && @ready @@ -65,34 +63,57 @@ def deploy_failed? end def exists? - @bare ? @found : true + unmanaged? ? @found : true end - def group_name - @bare ? "Bare pods" : @parent - end - - private - - def display_logs - return {} unless exists? && @containers.present? && !@already_displayed + # Returns a hash in the following format: + # { + # "pod/web-1/app-container" => "giant blob of logs\nas a single string" + # "pod/web-1/nginx-container" => "another giant blob of logs\nas a single string" + # } + def fetch_logs + return {} unless exists? && @containers.present? - @containers.each do |container_name| - out, _err, st = kubectl.run( + @containers.each_with_object({}) do |container_name, container_logs| + out, _err, _st = kubectl.run( "logs", @name, "--timestamps=true", "--since-time=#{@deploy_started.to_datetime.rfc3339}" ) - next unless st.success? && out.present? + container_logs["#{id}/#{container_name}"] = out + end + end + + private + + def unmanaged? + @parent.blank? + end + + def display_logs + return if @already_displayed + container_logs = fetch_logs + + if container_logs.empty? + @logger.warn("No logs found for pod #{id}") + return + end - @logger.info("Logs from #{id}/#{container_name}:") - out.split("\n").each do |line| - level = deploy_succeeded? ? :info : :error - @logger.public_send(level, "[#{id}/#{container_name}]\t#{line}") + container_logs.each do |container_identifier, logs| + if logs.blank? + @logger.warn("No logs found for #{container_identifier}") + else + @logger.blank_line + @logger.info("Logs from #{container_identifier}:") + logs.split("\n").each do |line| + @logger.info("[#{container_identifier}]\t#{line}") + end + @logger.blank_line end - @already_displayed = true end + + @already_displayed = true end end end diff --git a/lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb b/lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb index a7ba146e6..6753b9976 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb @@ -7,7 +7,6 @@ def sync _, _err, st = kubectl.run("get", type, @name) @found = st.success? @status = @found ? "Available" : "Unknown" - log_status end def deploy_succeeded? @@ -19,6 +18,10 @@ def deploy_method :replace_force end + def timeout_message + UNUSUAL_FAILURE_MESSAGE + end + def exists? @found end diff --git a/lib/kubernetes-deploy/kubernetes_resource/pod_template.rb b/lib/kubernetes-deploy/kubernetes_resource/pod_template.rb index eecfe5213..673ab8711 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/pod_template.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/pod_template.rb @@ -5,7 +5,6 @@ def sync _, _err, st = kubectl.run("get", type, @name) @status = st.success? ? "Available" : "Unknown" @found = st.success? - log_status end def deploy_succeeded? @@ -16,6 +15,10 @@ def deploy_failed? false end + def timeout_message + UNUSUAL_FAILURE_MESSAGE + end + def exists? @found end diff --git a/lib/kubernetes-deploy/kubernetes_resource/redis.rb b/lib/kubernetes-deploy/kubernetes_resource/redis.rb index 6e58a74c7..ce9c4e05d 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/redis.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/redis.rb @@ -7,17 +7,18 @@ class Redis < KubernetesResource def sync _, _err, st = kubectl.run("get", type, @name) @found = st.success? - @status = if redis_deployment_exists? && redis_service_exists? + @deployment_exists = redis_deployment_exists? + @service_exists = redis_service_exists? + + @status = if @deployment_exists && @service_exists "Provisioned" else "Unknown" end - - log_status end def deploy_succeeded? - redis_deployment_exists? && redis_service_exists? + @deployment_exists && @service_exists end def deploy_failed? diff --git a/lib/kubernetes-deploy/kubernetes_resource/service.rb b/lib/kubernetes-deploy/kubernetes_resource/service.rb index 0cc9a5037..d64d82f08 100644 --- a/lib/kubernetes-deploy/kubernetes_resource/service.rb +++ b/lib/kubernetes-deploy/kubernetes_resource/service.rb @@ -13,7 +13,6 @@ def sync @num_endpoints = 0 end @status = "#{@num_endpoints} endpoints" - log_status end def deploy_succeeded? @@ -24,6 +23,13 @@ def deploy_failed? false end + def timeout_message + <<-MSG.strip_heredoc.strip + This service does not have any endpoints. If the related pods are failing, fixing them will solve this as well. + If the related pods are up, this service's selector is probably incorrect. + MSG + end + def exists? @found end diff --git a/lib/kubernetes-deploy/resource_watcher.rb b/lib/kubernetes-deploy/resource_watcher.rb index 094196290..f27325503 100644 --- a/lib/kubernetes-deploy/resource_watcher.rb +++ b/lib/kubernetes-deploy/resource_watcher.rb @@ -14,25 +14,36 @@ def initialize(resources, logger:) def run(delay_sync: 3.seconds) delay_sync_until = Time.now.utc started_at = delay_sync_until - human_resources = @resources.map(&:id).join(", ") - max_wait_time = @resources.map(&:timeout).max - @logger.info("Waiting for #{human_resources} with #{max_wait_time}s timeout") while @resources.present? if Time.now.utc < delay_sync_until sleep(delay_sync_until - Time.now.utc) end + watch_time = (Time.now.utc - started_at).round(1) delay_sync_until = Time.now.utc + delay_sync # don't pummel the API if the sync is fast @resources.each(&:sync) newly_finished_resources, @resources = @resources.partition(&:deploy_finished?) + + new_success_list = [] newly_finished_resources.each do |resource| - next unless resource.deploy_failed? || resource.deploy_timed_out? - @logger.error("#{resource.id} failed to deploy with status '#{resource.status}'.") + if resource.deploy_failed? + @logger.error("#{resource.id} failed to deploy after #{watch_time}s") + elsif resource.deploy_timed_out? + @logger.error("#{resource.id} deployment timed out") + else + new_success_list << resource.id + end + end + + unless new_success_list.empty? + success_string = ColorizedString.new("Successfully deployed in #{watch_time}s:").green + @logger.info("#{success_string} #{new_success_list.join(', ')}") end - end - watch_time = Time.now.utc - started_at - @logger.info("Spent #{watch_time.round(2)}s waiting for #{human_resources}") + if newly_finished_resources.present? && @resources.present? # something happened this cycle, more to go + @logger.info("Continuing to wait for: #{@resources.map(&:id).join(', ')}") + end + end end end end diff --git a/lib/kubernetes-deploy/restart_task.rb b/lib/kubernetes-deploy/restart_task.rb index 23d50665b..295857fb3 100644 --- a/lib/kubernetes-deploy/restart_task.rb +++ b/lib/kubernetes-deploy/restart_task.rb @@ -1,11 +1,9 @@ # frozen_string_literal: true require 'kubernetes-deploy/kubeclient_builder' -require 'kubernetes-deploy/ui_helpers' require 'kubernetes-deploy/resource_watcher' module KubernetesDeploy class RestartTask - include UIHelpers include KubernetesDeploy::KubeclientBuilder class DeploymentNotFoundError < FatalDeploymentError @@ -35,6 +33,7 @@ def initialize(context:, namespace:, logger:) end def perform(deployments_names = nil) + @logger.reset verify_namespace if deployments_names @@ -53,10 +52,10 @@ def perform(deployments_names = nil) end end - phase_heading("Triggering restart by touching ENV[RESTARTED_AT]") + @logger.phase_heading("Triggering restart by touching ENV[RESTARTED_AT]") patch_kubeclient_deployments(deployments) - phase_heading("Waiting for rollout") + @logger.phase_heading("Waiting for rollout") wait_for_rollout(deployments) names = deployments.map { |d| "`#{d.metadata.name}`" } diff --git a/lib/kubernetes-deploy/runner.rb b/lib/kubernetes-deploy/runner.rb index 2573ab6c6..226b31d63 100644 --- a/lib/kubernetes-deploy/runner.rb +++ b/lib/kubernetes-deploy/runner.rb @@ -22,14 +22,12 @@ require "kubernetes-deploy/kubernetes_resource/#{subresource}" end require 'kubernetes-deploy/resource_watcher' -require "kubernetes-deploy/ui_helpers" require 'kubernetes-deploy/kubectl' require 'kubernetes-deploy/kubeclient_builder' require 'kubernetes-deploy/ejson_secret_provisioner' module KubernetesDeploy class Runner - include UIHelpers include KubeclientBuilder PREDEPLOY_SEQUENCE = %w( @@ -80,18 +78,17 @@ def initialize(namespace:, context:, current_sha:, template_dir:, logger:, bindi end def run(verify_result: true, allow_protected_ns: false, prune: true) - phase_heading("Validating configuration") - validate_configuration(allow_protected_ns: allow_protected_ns, prune: prune) + @logger.reset - phase_heading("Identifying deployment target") + @logger.phase_heading("Initializing deploy") + validate_configuration(allow_protected_ns: allow_protected_ns, prune: prune) confirm_context_exists confirm_namespace_exists - - phase_heading("Parsing deploy content") resources = discover_resources - phase_heading("Checking initial resource statuses") + @logger.phase_heading("Checking initial resource statuses") resources.each(&:sync) + resources.each { |r| @logger.info(r.pretty_status) } ejson = EjsonSecretProvisioner.new( namespace: @namespace, @@ -100,14 +97,16 @@ def run(verify_result: true, allow_protected_ns: false, prune: true) logger: @logger ) if ejson.secret_changes_required? - phase_heading("Deploying kubernetes secrets from #{EjsonSecretProvisioner::EJSON_SECRETS_FILE}") + @logger.phase_heading("Deploying kubernetes secrets from #{EjsonSecretProvisioner::EJSON_SECRETS_FILE}") ejson.run end - phase_heading("Predeploying priority resources") - predeploy_priority_resources(resources) + if deploy_has_priority_resources?(resources) + @logger.phase_heading("Predeploying priority resources") + predeploy_priority_resources(resources) + end - phase_heading("Deploying all resources") + @logger.phase_heading("Deploying all resources") if PROTECTED_NAMESPACES.include?(@namespace) && prune raise FatalDeploymentError, "Refusing to deploy to protected namespace '#{@namespace}' with pruning enabled" end @@ -116,12 +115,14 @@ def run(verify_result: true, allow_protected_ns: false, prune: true) return true unless verify_result wait_for_completion(resources) - report_final_status(resources) - - resources.all?(&:deploy_succeeded?) + record_statuses(resources) + success = resources.all?(&:deploy_succeeded?) rescue FatalDeploymentError => error - @logger.fatal "#{error.class}: #{error.message}" - false + @logger.summary.add_action(error.message) + success = false + ensure + @logger.print_summary(success) + success end def template_variables @@ -133,6 +134,23 @@ def template_variables private + def record_statuses(resources) + successful_resources, failed_resources = resources.partition(&:deploy_succeeded?) + fail_count = failed_resources.length + success_count = successful_resources.length + + if success_count > 0 + @logger.summary.add_action("successfully deployed #{success_count} #{'resource'.pluralize(success_count)}") + final_statuses = successful_resources.map(&:pretty_status).join("\n") + @logger.summary.add_paragraph("#{ColorizedString.new('Successful resources').green}\n#{final_statuses}") + end + + if fail_count > 0 + @logger.summary.add_action("failed to deploy #{fail_count} #{'resource'.pluralize(fail_count)}") + failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) } + end + end + def versioned_prune_whitelist if server_major_version == "1.5" BASE_PRUNE_WHITELIST + PRUNE_WHITELIST_V_1_5 @@ -152,7 +170,7 @@ def server_major_version # Inspect the file referenced in the kubectl stderr # to make it easier for developer to understand what's going on - def inspect_kubectl_out_for_files(stderr) + def find_bad_file_from_kubectl_output(stderr) # Output example: # Error from server (BadRequest): error when creating "/path/to/configmap-gqq5oh.yml20170411-33615-t0t3m": match = stderr.match(%r{BadRequest.*"(?\/\S+\.yml\S+)"}) @@ -161,12 +179,12 @@ def inspect_kubectl_out_for_files(stderr) path = match[:path] if path.present? && File.file?(path) suspicious_file = File.read(path) - @logger.warn("Inspecting the file mentioned in the error message (#{path})") - @logger.warn(suspicious_file) - else - @logger.warn("Detected a file (#{path.inspect}) referenced in the kubectl stderr " \ - "but was unable to inspect it") end + [File.basename(path, ".*"), suspicious_file] + end + + def deploy_has_priority_resources?(resources) + resources.any? { |r| PREDEPLOY_SEQUENCE.include?(r.type) } end def predeploy_priority_resources(resource_list) @@ -175,15 +193,20 @@ def predeploy_priority_resources(resource_list) next if matching_resources.empty? deploy_resources(matching_resources) wait_for_completion(matching_resources) - fail_list = matching_resources.select { |r| r.deploy_failed? || r.deploy_timed_out? }.map(&:id) - unless fail_list.empty? - raise FatalDeploymentError, "The following priority resources failed to deploy: #{fail_list.join(', ')}" + + failed_resources = matching_resources.reject(&:deploy_succeeded?) + fail_count = failed_resources.length + if fail_count > 0 + failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) } + raise FatalDeploymentError, "Failed to deploy #{fail_count} priority #{'resource'.pluralize(fail_count)}" end + @logger.blank_line end end def discover_resources resources = [] + @logger.info("Discovering templates:") Dir.foreach(@template_dir) do |filename| next unless filename.end_with?(".yml.erb", ".yml") @@ -192,15 +215,30 @@ def discover_resources type, name = resource_id.split("/", 2) # e.g. "pod/web-198612918-dzvfb" resources << KubernetesResource.for_type(type: type, name: name, namespace: @namespace, context: @context, file: tempfile, logger: @logger) - @logger.info "Discovered template for #{resource_id}" + @logger.info " - #{resource_id}" end end resources end def discover_resource_via_dry_run(tempfile) - resource_id, _err, st = kubectl.run("create", "-f", tempfile.path, "--dry-run", "--output=name") - raise FatalDeploymentError, "Dry run failed for template #{File.basename(tempfile.path)}." unless st.success? + command = ["create", "-f", tempfile.path, "--dry-run", "--output=name"] + resource_id, err, st = kubectl.run(*command, log_failure: false) + + unless st.success? + debug_msg = <<-DEBUG_MSG.strip_heredoc + This usually means template '#{File.basename(tempfile.path, '.*')}' is not a valid Kubernetes template. + + Error from kubectl: + #{err} + + Rendered template content: + DEBUG_MSG + debug_msg += File.read(tempfile.path) + @logger.summary.add_paragraph(debug_msg) + + raise FatalDeploymentError, "Kubectl dry run failed (command: #{Shellwords.join(command)})" + end resource_id end @@ -216,17 +254,38 @@ def split_templates(filename) yield f end rescue Psych::SyntaxError => e - @logger.error(rendered_content) - raise FatalDeploymentError, "Template #{filename} cannot be parsed: #{e.message}" + debug_msg = <<-INFO.strip_heredoc + Error message: #{e} + + Template content: + --- + INFO + debug_msg += rendered_content + @logger.summary.add_paragraph(debug_msg) + raise FatalDeploymentError, "Template '#{filename}' cannot be parsed" end - def report_final_status(resources) - if resources.all?(&:deploy_succeeded?) - @logger.info("Deploy succeeded!") + def record_apply_failure(err) + file_name, file_content = find_bad_file_from_kubectl_output(err) + if file_name + debug_msg = <<-HELPFUL_MESSAGE.strip_heredoc + This usually means your template named '#{file_name}' is invalid. + + Error from kubectl: + #{err} + + Rendered template content: + HELPFUL_MESSAGE + debug_msg += file_content || "Failed to read file" else - fail_list = resources.select { |r| r.deploy_failed? || r.deploy_timed_out? }.map(&:id) - raise FatalDeploymentError, "The following resources failed to deploy: #{fail_list.join(', ')}" + debug_msg = <<-FALLBACK_MSG + This usually means one of your templates is invalid, but we were unable to automatically identify which one. + Please inspect the error message from kubectl: + #{err} + FALLBACK_MSG end + + @logger.summary.add_paragraph(debug_msg) end def wait_for_completion(watched_resources) @@ -243,6 +302,9 @@ def render_template(filename, raw_template) erb_binding.local_variable_set(var_name, value) end erb_template.result(erb_binding) + rescue NameError => e + @logger.summary.add_paragraph("Error from renderer:\n #{e.message.tr("\n", ' ')}") + raise FatalDeploymentError, "Template '#{filename}' cannot be rendered" end def validate_configuration(allow_protected_ns:, prune:) @@ -267,12 +329,10 @@ def validate_configuration(allow_protected_ns:, prune:) if allow_protected_ns && prune errors << "Refusing to deploy to protected namespace '#{@namespace}' with pruning enabled" elsif allow_protected_ns - warning = <<-WARNING.strip_heredoc - You're deploying to protected namespace #{@namespace}, which cannot be pruned. - Existing resources can only be removed manually with kubectl. Removing templates from the set deployed will have no effect. - ***Please do not deploy to #{@namespace} unless you really know what you are doing.*** - WARNING - @logger.warn(warning) + @logger.warn("You're deploying to protected namespace #{@namespace}, which cannot be pruned.") + @logger.warn("Existing resources can only be removed manually with kubectl. " \ + "Removing templates from the set deployed will have no effect.") + @logger.warn("***Please do not deploy to #{@namespace} unless you really know what you are doing.***") else errors << "Refusing to deploy to protected namespace '#{@namespace}'" end @@ -282,7 +342,11 @@ def validate_configuration(allow_protected_ns:, prune:) errors << "Context must be specified" end - raise FatalDeploymentError, "Configuration invalid: #{errors.join(', ')}" unless errors.empty? + unless errors.empty? + @logger.summary.add_paragraph(errors.map { |err| "- #{err}" }.join("\n")) + raise FatalDeploymentError, "Configuration invalid" + end + @logger.info("All required parameters and files are present") end @@ -324,7 +388,7 @@ def apply_all(resources, prune) command = ["apply"] resources.each do |r| - @logger.info("- #{r.id}") + @logger.info("- #{r.id} (timeout: #{r.timeout}s)") command.push("-f", r.file.path) r.deploy_started = Time.now.utc end @@ -334,18 +398,26 @@ def apply_all(resources, prune) versioned_prune_whitelist.each { |type| command.push("--prune-whitelist=#{type}") } end - _, err, st = kubectl.run(*command) - unless st.success? - inspect_kubectl_out_for_files(err) - raise FatalDeploymentError, <<-MSG -"The following command failed: #{Shellwords.join(command)}" -#{err} -MSG + out, err, st = kubectl.run(*command, log_failure: false) + if st.success? + log_pruning(out) if prune + else + record_apply_failure(err) + raise FatalDeploymentError, "Command failed: #{Shellwords.join(command)}" end end + def log_pruning(kubectl_output) + pruned = kubectl_output.scan(/^(.*) pruned$/) + return unless pruned.present? + + @logger.info("The following resources were pruned: #{pruned.join(', ')}") + @logger.summary.add_action("pruned #{pruned.length} resources") + end + def confirm_context_exists - out, err, st = kubectl.run("config", "get-contexts", "-o", "name", use_namespace: false, use_context: false) + out, err, st = kubectl.run("config", "get-contexts", "-o", "name", + use_namespace: false, use_context: false, log_failure: false) available_contexts = out.split("\n") if !st.success? raise FatalDeploymentError, err diff --git a/lib/kubernetes-deploy/runner_task.rb b/lib/kubernetes-deploy/runner_task.rb index 166b905e4..e2e13efae 100644 --- a/lib/kubernetes-deploy/runner_task.rb +++ b/lib/kubernetes-deploy/runner_task.rb @@ -2,13 +2,11 @@ require 'tempfile' require 'kubernetes-deploy/kubeclient_builder' -require 'kubernetes-deploy/ui_helpers' require 'kubernetes-deploy/kubectl' module KubernetesDeploy class RunnerTask include KubeclientBuilder - include UIHelpers class FatalTaskRunError < FatalDeploymentError; end class TaskTemplateMissingError < FatalDeploymentError @@ -33,18 +31,19 @@ def run(*args) end def run!(task_template:, entrypoint:, args:, env_vars: []) - phase_heading("Validating configuration") + @logger.reset + @logger.phase_heading("Validating configuration") validate_configuration(task_template, args) - phase_heading("Fetching task template") + @logger.phase_heading("Fetching task template") raw_template = get_template(task_template) - phase_heading("Constructing final pod specification") + @logger.phase_heading("Constructing final pod specification") rendered_template = build_pod_template(raw_template, entrypoint, args, env_vars) validate_pod_spec(rendered_template) - phase_heading("Creating pod") + @logger.phase_heading("Creating pod") @logger.info("Starting task runner pod: '#{rendered_template.metadata.name}'") @kubeclient.create_pod(rendered_template) end diff --git a/lib/kubernetes-deploy/ui_helpers.rb b/lib/kubernetes-deploy/ui_helpers.rb deleted file mode 100644 index e1f7788cc..000000000 --- a/lib/kubernetes-deploy/ui_helpers.rb +++ /dev/null @@ -1,15 +0,0 @@ -# frozen_string_literal: true -module KubernetesDeploy - module UIHelpers - private - - def phase_heading(phase_name) - @current_phase ||= 0 - @current_phase += 1 - heading = "Phase #{@current_phase}: #{phase_name}" - padding = (100.0 - heading.length) / 2 - @logger.info("") - @logger.info("#{'-' * padding.floor}#{heading}#{'-' * padding.ceil}") - end - end -end diff --git a/test/integration/kubernetes_deploy_test.rb b/test/integration/kubernetes_deploy_test.rb index 967f497aa..ab5cf8253 100644 --- a/test/integration/kubernetes_deploy_test.rb +++ b/test/integration/kubernetes_deploy_test.rb @@ -20,7 +20,7 @@ def test_partial_deploy_followed_by_full_deploy hello_cloud.assert_all_up end - def test_pruning + def test_pruning_works assert deploy_fixtures("hello-cloud") hello_cloud = FixtureSetAssertions::HelloCloud.new(@namespace) hello_cloud.assert_all_up @@ -30,6 +30,18 @@ def test_pruning hello_cloud.refute_configmap_data_exists hello_cloud.refute_managed_pod_exists hello_cloud.refute_web_resources_exist + + expected_pruned = [ + 'configmap "hello-cloud-configmap-data"', + 'pod "unmanaged-pod-', + 'service "web"', + 'deployment "web"', + 'ingress "web"' + ] # not necessarily listed in this order + assert_logs_match(/Pruned 5 resources and successfully deployed 3 resources/) + expected_pruned.each do |resource| + assert_logs_match(/The following resources were pruned:.*#{resource}/) + end end def test_pruning_disabled @@ -99,7 +111,8 @@ def test_success_with_unrecognized_resource_type def test_invalid_yaml_fails_fast refute deploy_dir(fixture_path("invalid")) - assert_logs_match(/Template yaml-error.yml cannot be parsed/) + assert_logs_match(/Template 'yaml-error.yml' cannot be parsed/) + assert_logs_match(/datapoint1: value1:/) end def test_invalid_k8s_spec_that_is_valid_yaml_fails_fast @@ -109,7 +122,7 @@ def test_invalid_k8s_spec_that_is_valid_yaml_fails_fast end assert_equal false, success, "Deploy succeeded when it was expected to fail" - assert_logs_match(/Dry run failed for template configmap-data/) + assert_logs_match(/'configmap-data' is not a valid Kubernetes template/) assert_logs_match(/error validating data\: found invalid field myKey for v1.ObjectMeta/) end @@ -132,9 +145,10 @@ def test_invalid_k8s_spec_that_is_valid_yaml_fails_on_apply end assert_equal false, success, "Deploy succeeded when it was expected to fail" - assert_logs_match(/The following command failed: apply/) + assert_logs_match(/Command failed: apply -f/) assert_logs_match(/Error from server \(BadRequest\): error when creating/) - assert_logs_match(/Inspecting the file mentioned in the error message/) + assert_logs_match(/Rendered template content:/) + assert_logs_match(/not_a_name:/) end def test_dead_pods_in_old_replicaset_are_ignored @@ -167,7 +181,8 @@ def test_bad_container_image_on_run_once_halts_and_fails_deploy pod["spec"]["containers"].first["image"] = "hello-world:thisImageIsBad" end assert_equal false, success, "Deploy succeeded when it was expected to fail" - assert_logs_match(%r{The following priority resources failed to deploy: Pod\/unmanaged-pod}) + assert_logs_match("Failed to deploy 1 priority resource") + assert_logs_match(%r{Pod\/unmanaged-pod-\w+-\w+: FAILED}) hello_cloud = FixtureSetAssertions::HelloCloud.new(@namespace) hello_cloud.assert_unmanaged_pod_statuses("Failed") @@ -183,7 +198,8 @@ def test_wait_false_still_waits_for_priority_resources pod["spec"]["containers"].first["image"] = "hello-world:thisImageIsBad" end assert_equal false, success, "Deploy succeeded when it was expected to fail" - assert_logs_match(%r{The following priority resources failed to deploy: Pod\/unmanaged-pod}) + assert_logs_match("Failed to deploy 1 priority resource") + assert_logs_match(%r{Pod\/unmanaged-pod-\w+-\w+: FAILED}) assert_logs_match(/DeadlineExceeded/) end @@ -205,10 +221,9 @@ def test_extra_bindings_should_be_rendered assert_equal 'binding_test_b', map['BINDING_TEST_B'] end - def test_should_raise_if_required_binding_not_present - assert_raises NameError do - deploy_fixtures('collection-with-erb', subset: ["conf_map.yml.erb"]) - end + def test_deploy_fails_if_required_binding_not_present + refute deploy_fixtures('collection-with-erb', subset: ["conf_map.yml.erb"]) + assert_logs_match("Template 'conf_map.yml.erb' cannot be rendered") end def test_long_running_deployment @@ -250,7 +265,7 @@ def test_create_ejson_secrets_with_malformed_secret_data fixtures["secrets.ejson"]["kubernetes_secrets"]["monitoring-token"]["data"] = malformed end assert_equal false, success, "Deploy succeeded when it was expected to fail" - assert_logs_match(/Data for secret monitoring-token was invalid/) + assert_logs_match(/data for secret monitoring-token was invalid/) end def test_pruning_of_secrets_created_from_ejson @@ -294,6 +309,78 @@ def test_pruning_of_existing_managed_secrets_when_ejson_file_has_been_deleted ejson_cloud.refute_resource_exists('secret', 'monitoring-token') end + def test_deploy_result_logging_for_mixed_result_deploy + KubernetesDeploy::Pod.any_instance.stubs(:deploy_failed?).returns(false, false, false, false, true) + service_timeout = 5 + KubernetesDeploy::Service.any_instance.stubs(:timeout).returns(service_timeout) + + success = deploy_fixtures("hello-cloud", subset: ["web.yml.erb", "configmap-data.yml"]) do |fixtures| + web = fixtures["web.yml.erb"]["Deployment"].first + container = web["spec"]["template"]["spec"]["containers"].first + container["command"] = ["/usr/sbin/nginx", "-s", "stop"] # it isn't running, so this will log some errors + end + assert_equal false, success, "Deploy succeeded when it was expected to fail" + + # List of successful resources + assert_logs_match(%r{ConfigMap/hello-cloud-configmap-data\s+Available}) + assert_logs_match(%r{Ingress/web\s+Created}) + + # Debug info for service timeout + assert_logs_match("Service/web: TIMED OUT (limit: #{service_timeout}s)") + assert_logs_match("service does not have any endpoints") + assert_logs_match("Final status: 0 endpoints") + + # Debug info for deployment failure + assert_logs_match("Deployment/web: FAILED") + assert_logs_match("Final status: 1 updatedReplicas, 1 replicas, 1 unavailableReplicas") + assert_logs_match(%r{\[Deployment/web\].*Scaled up replica set web-}) # deployment event + assert_logs_match(%r{\[Pod/web-.*CrashLoopBackOff.*restarting failed container=app pod=web-}) # pod event + assert_logs_match("nginx: [error]") # pod log + end + + def test_failed_deploy_to_nonexistent_namespace + original_ns = @namespace + @namespace = 'this-certainly-should-not-exist' + refute deploy_fixtures("hello-cloud", subset: ['configmap-data.yml']) + assert_logs_match(/Result: FAILURE.*Namespace this-certainly-should-not-exist not found/m) + ensure + @namespace = original_ns + end + + def test_failure_logs_from_unmanaged_pod_appear_in_summary_section + success = deploy_fixtures("hello-cloud", subset: ["configmap-data.yml", "unmanaged-pod.yml.erb"]) do |fixtures| + pod = fixtures["unmanaged-pod.yml.erb"]["Pod"].first + container = pod["spec"]["containers"].first + container["command"] = ["/some/bad/path"] # should throw an error + end + assert_equal false, success + + assert_logs_match("Failed to deploy 1 priority resource") + assert_logs_match("Container command '/some/bad/path' not found or does not exist") # from an event + assert_logs_match(/Result.*no such file or directory/m) # from logs + refute_logs_match(/no such file or directory.*Result/m) # logs not also displayed before summary + end + + def test_unusual_timeout_output + KubernetesDeploy::ConfigMap.any_instance.stubs(:deploy_succeeded?).returns(false) + KubernetesDeploy::ConfigMap.any_instance.stubs(:timeout).returns(2) + refute deploy_fixtures('hello-cloud', subset: ["configmap-data.yml"]) + assert_logs_match("It is very unusual for this resource type to fail to deploy. Please try the deploy again.") + assert_logs_match("Final status: Available") + end + + # ref https://github.com/kubernetes/kubernetes/issues/26202 + def test_output_when_switching_labels_incorrectly + assert deploy_fixtures("hello-cloud", subset: ["configmap-data.yml", "web.yml.erb"]) + success = deploy_fixtures("hello-cloud", subset: ["configmap-data.yml", "web.yml.erb"]) do |fixtures| + web = fixtures["web.yml.erb"]["Deployment"].first + web["spec"]["template"]["metadata"]["labels"] = { "name" => "foobar" } + end + assert_equal false, success + assert_logs_match("one of your templates is invalid") + assert_logs_match(/The Deployment "web" is invalid.*`selector` does not match template `labels`/) + end + private def count_by_revisions(pods) diff --git a/test/test_helper.rb b/test/test_helper.rb index 031701c82..220040145 100644 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -19,8 +19,20 @@ module KubernetesDeploy class TestCase < ::Minitest::Test def setup - ColorizedString.disable_colorization = true unless ENV["PRINT_LOGS"] @logger_stream = StringIO.new + + if ENV["PRINT_LOGS"] + # Allows you to view the integration test output as a series of tophat scenarios + <<-MESSAGE.strip_heredoc.each_line { |l| $stderr.puts l } + + \033[0;35m*************************************************************************** + Begin test: #{name} + ***************************************************************************\033[0m + + MESSAGE + else + ColorizedString.disable_colorization = true + end end def logger diff --git a/test/unit/kubernetes-deploy/resource_watcher_test.rb b/test/unit/kubernetes-deploy/resource_watcher_test.rb index ee240c53d..921d01a52 100644 --- a/test/unit/kubernetes-deploy/resource_watcher_test.rb +++ b/test/unit/kubernetes-deploy/resource_watcher_test.rb @@ -17,8 +17,7 @@ def test_success_with_mock_resource watcher = KubernetesDeploy::ResourceWatcher.new([resource], logger: logger) watcher.run(delay_sync: 0.1) - assert_logs_match(/Waiting for web-pod with 1s timeout/) - assert_logs_match(/Spent (\S+)s waiting for web-pod/) + assert_logs_match(/Successfully deployed in \d.\ds: web-pod/) end def test_failure_with_mock_resource @@ -27,9 +26,7 @@ def test_failure_with_mock_resource watcher = KubernetesDeploy::ResourceWatcher.new([resource], logger: logger) watcher.run(delay_sync: 0.1) - assert_logs_match(/Waiting for web-pod with 1s timeout/) - assert_logs_match(/web-pod failed to deploy with status 'failed'/) - assert_logs_match(/Spent (\S+)s waiting for web-pod/) + assert_logs_match(/web-pod failed to deploy after \d\.\ds/) end def test_timeout_from_resource @@ -38,9 +35,7 @@ def test_timeout_from_resource watcher = KubernetesDeploy::ResourceWatcher.new([resource], logger: logger) watcher.run(delay_sync: 0.1) - assert_logs_match(/Waiting for web-pod with 1s timeout/) - assert_logs_match(/web-pod failed to deploy with status 'timeout'/) - assert_logs_match(/Spent (\S+)s waiting for web-pod/) + assert_logs_match(/web-pod deployment timed out/) end def test_wait_logging_when_resources_do_not_finish_together @@ -52,10 +47,16 @@ def test_wait_logging_when_resources_do_not_finish_together watcher = KubernetesDeploy::ResourceWatcher.new([first, second, third, fourth], logger: logger) watcher.run(delay_sync: 0.1) - assert_logs_match(/Waiting for first, second, third, fourth with 4s timeout/) - assert_logs_match(/second failed to deploy with status 'timeout'/) - assert_logs_match(/third failed to deploy with status 'failed'/) - assert_logs_match(/Spent (\S+)s waiting for first, second, third, fourth/) + @logger_stream.rewind + scanner = StringScanner.new(@logger_stream.read) + + assert scanner.scan_until(/Successfully deployed in \d.\ds: first/) + assert scanner.scan_until(/Continuing to wait for: second, third, fourth/) + assert scanner.scan_until(/second deployment timed out/) + assert scanner.scan_until(/Continuing to wait for: third, fourth/) + assert scanner.scan_until(/third failed to deploy after \d.\ds/) + assert scanner.scan_until(/Continuing to wait for: fourth/) + assert scanner.scan_until(/Successfully deployed in \d.\ds: fourth/) end private