@type tail path /var/log/containers/*.log pos_file /var/log/k8s-gcp-containers.log.pos tag reform.* read_from_head true @type multi_format format json time_key time time_format %Y-%m-%dT%H:%M:%S.%NZ format /^(? @type parser format /^(?\w)(? @type record_reformer enable_ruby true # Extract local_resource_id from tag for 'k8s_container' monitored # resource. The format is: # 'k8s_container...'. "logging.googleapis.com/local_resource_id" ${"k8s_container.#{tag_suffix[4].rpartition('.')[0].split('_')[1]}.#{tag_suffix[4].rpartition('.')[0].split('_')[0]}.#{tag_suffix[4].rpartition('.')[0].split('_')[2].rpartition('-')[0]}"} # Rename the field 'log' to a more generic field 'message'. This way the # fluent-plugin-google-cloud knows to flatten the field as textPayload # instead of jsonPayload after extracting 'time', 'severity' and # 'stream' from the record. message ${record['log']} # If 'severity' is not set, assume stderr is ERROR and stdout is INFO. severity ${record['severity'] || if record['stream'] == 'stderr' then 'ERROR' else 'INFO' end} tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end} remove_keys stream,log @type record_transformer enable_ruby true message ${record['message'].length > 100000 ? "[Trimmed]#{record['message'][0..100000]}..." : record['message']} # Do not collect fluentd's own logs to avoid infinite loops. @type null # This section is exclusive for k8s_container logs. These logs come with # 'stderr'/'stdout' tags. # We use a separate output stanza for 'k8s_node' logs with a smaller buffer # because node logs are less important than user's container logs. @type google_cloud # Try to detect JSON formatted log entries. detect_json true # Collect metrics in Prometheus registry about plugin activity. enable_monitoring true monitoring_type prometheus # Allow log entries from multiple containers to be sent in the same request. split_logs_by_tag false # Set the buffer type to file to improve the reliability and reduce the memory consumption buffer_type file buffer_path /var/log/k8s-fluentd-buffers/kubernetes.containers.buffer # Set queue_full action to block because we want to pause gracefully # in case of the off-the-limits load instead of throwing an exception buffer_queue_full_action block # Set the chunk limit conservatively to avoid exceeding the recommended # chunk size of 5MB per write request. buffer_chunk_limit 512k # Cap the combined memory usage of this buffer and the one below to # 1MiB/chunk * (6 + 2) chunks = 8 MiB buffer_queue_limit 6 # Never wait more than 5 seconds before flushing logs in the non-error case. flush_interval 5s # Never wait longer than 30 seconds between retries. max_retry_wait 30 # Disable the limit on the number of retries (retry forever). disable_retry_limit # Use multiple threads for processing. num_threads 2 use_grpc true k8s_cluster_name "#{ENV["CLUSTER_NAME"]}" k8s_cluster_location "#{ENV["CLUSTER_LOCATION"]}" adjust_invalid_timestamps false # Attach local_resource_id for 'k8s_node' monitored resource. @type record_transformer enable_ruby true "logging.googleapis.com/local_resource_id" ${"k8s_node.#{ENV['NODE_NAME']}"} # This section is exclusive for 'k8s_node' logs. These logs come with tags # that are neither 'stderr' or 'stdout'. # We use a separate output stanza for 'k8s_container' logs with a larger @type google_cloud detect_json true enable_monitoring true monitoring_type prometheus # Allow entries from multiple system logs to be sent in the same request. split_logs_by_tag false detect_subservice false buffer_type file buffer_path /var/log/k8s-fluentd-buffers/kubernetes.system.buffer buffer_queue_full_action block buffer_chunk_limit 512k buffer_queue_limit 2 flush_interval 5s max_retry_wait 30 disable_retry_limit num_threads 2 use_grpc true k8s_cluster_name "#{ENV["CLUSTER_NAME"]}" k8s_cluster_location "#{ENV["CLUSTER_LOCATION"]}" adjust_invalid_timestamps false