diff --git a/filebeat/etc/beat.yml b/filebeat/etc/beat.yml index 6aad948c5ca..a226fc23d4b 100644 --- a/filebeat/etc/beat.yml +++ b/filebeat/etc/beat.yml @@ -1,192 +1,189 @@ -################### Filebeat Configuration Example ######################### +###################### Filebeat Configuration Example ######################### -######################### Filebeat prospectors ############################# +#=========================== Filebeat prospectors ============================= # List of prospectors to fetch data. filebeat.prospectors: - # Each - is a prospector. Below are the prospector specific configurations - - # Type of the files. Based on this the way the file is read is decided. - # The different types cannot be mixed in one prospector - # - # Possible options are: - # * log: Reads every line of the log file (default) - # * stdin: Reads the standard in - - input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #json.message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied top level in the output document. - #json.keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #json.overwrite_keys: false - - # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #json.add_error_key: false - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - # Set to true to store the additional fields as top level fields instead - # of under the "fields" sub-dictionary. In case of name conflicts with the - # fields added by Filebeat itself, the custom fields overwrite the default - # fields. - #fields_under_root: false - - # Ignore files which were modified more then the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #ignore_older: 0 - - # Close older closes the file handler for which were not modified - # for longer then close_older - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_older: 1h - - # Type to be published in the 'type' field. For Elasticsearch output, - # the type defines the document type these entries should be stored - # in. Default: log - #document_type: log - - # Scan frequency in seconds. - # How often these files should be checked for changes. In case it is set - # to 0s, it is done as often as possible. Default: 10s - #scan_frequency: 10s - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #max_bytes: 10485760 - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - # The maximum number of lines that are combined to one event. - # In case there are more the max_lines the additional lines are discarded. - # Default is 500 - #multiline.max_lines: 500 - - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #multiline.timeout: 5s - - # Setting tail_files to true means filebeat starts reading new files at the end - # instead of the beginning. If this is used in combination with log rotation - # this can mean that the first entries of a new file are skipped. - #tail_files: false - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #max_backoff: 10s - - # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, - # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. - # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached - #backoff_factor: 2 - - # This option closes a file, as soon as the file name changes. - # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause - # issues when the file is removed, as the file will not be fully removed until also Filebeat closes - # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the - # same name can be created. Turning this feature on the other hand can lead to loss of data - # on rotate files. It can happen that after file rotation the beginning of the new - # file is skipped, as the reading starts at the end. We recommend to leave this option on false - # but lower the ignore_older value to release files faster. - #force_close_files: false - - # Additional stdin prospector - - # Configuration to use stdin input - #- input_type: stdin - - -######################### Filebeat general configuration ############################# - -#filebeat: +# Each - is a prospector. Below are the prospector specific configurations + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +- input_type: log + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ["^DBG"] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ["^ERR", "^WARN"] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: [".gz$"] + + # Optional additional fields. These field can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Close older closes the file handler for which were not modified + # for longer then close_older + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_older: 1h + + # Type to be published in the 'type' field. For Elasticsearch output, + # the type defines the document type these entries should be stored + # in. Default: log + #document_type: log + + # Scan frequency in seconds. + # How often these files should be checked for changes. In case it is set + # to 0s, it is done as often as possible. Default: 10s + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + # Mutiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation - # Event count spool threshold - forces network flush if exceeded - #spool_size: 2048 - - # Enable async publisher pipeline in filebeat (Experimental!) - #publish_async: false + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # This option closes a file, as soon as the file name changes. + # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause + # issues when the file is removed, as the file will not be fully removed until also Filebeat closes + # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the + # same name can be created. Turning this feature on the other hand can lead to loss of data + # on rotate files. It can happen that after file rotation the beginning of the new + # file is skipped, as the reading starts at the end. We recommend to leave this option on false + # but lower the ignore_older value to release files faster. + #force_close_files: false + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- input_type: stdin + +#========================= Filebeat global options ============================ + +# Event count spool threshold - forces network flush if exceeded +#filebeat.spool_size: 2048 - # Defines how often the spooler is flushed. After idle_timeout the spooler is - # Flush even though spool_size is not reached. - #idle_timeout: 5s +# Enable async publisher pipeline in filebeat (Experimental!) +#filebeat.publish_async: false - # Name of the registry file. If a relative path is used, it is considered relative to the - # data path. - #registry_file: registry +# Defines how often the spooler is flushed. After idle_timeout the spooler is +# Flush even though spool_size is not reached. +#filebeat.idle_timeout: 5s - # Full Path to directory with additional prospector configuration files. Each file must end with .yml - # These config files must have the full filebeat config part inside, but only - # the prospector part is processed. All global options like spool_size are ignored. - # The config_dir MUST point to a different directory then where the main filebeat config file is in. - #config_dir: +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: registry +# +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 42977702da5..959747339fd 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -1,205 +1,243 @@ -################### Filebeat Configuration Example ######################### +###################### Filebeat Configuration Example ######################### -######################### Filebeat prospectors ############################# +#=========================== Filebeat prospectors ============================= # List of prospectors to fetch data. filebeat.prospectors: - # Each - is a prospector. Below are the prospector specific configurations - - # Type of the files. Based on this the way the file is read is decided. - # The different types cannot be mixed in one prospector - # - # Possible options are: - # * log: Reads every line of the log file (default) - # * stdin: Reads the standard in - - input_type: log - - # Paths that should be crawled and fetched. Glob based paths. - # To fetch all ".log" files from a specific level of subdirectories - # /var/log/*/*.log can be used. - # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. - paths: - - /var/log/*.log - #- c:\programdata\elasticsearch\logs\* - - # Configure the file encoding for reading files with international characters - # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). - # Some sample encodings: - # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, - # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... - #encoding: plain - - # Decode JSON options. Enable this if your logs are structured in JSON. - # JSON key on which to apply the line filtering and multiline settings. This key - # must be top level and its value must be string, otherwise it is ignored. If - # no text key is defined, the line filtering and multiline features cannot be used. - #json.message_key: - - # By default, the decoded JSON is placed under a "json" key in the output document. - # If you enable this setting, the keys are copied top level in the output document. - #json.keys_under_root: false - - # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) - # in case of conflicts. - #json.overwrite_keys: false - - # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON - # unmarshaling errors or when a text key is defined in the configuration but cannot - # be used. - #json.add_error_key: false - - # Exclude lines. A list of regular expressions to match. It drops the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, no lines are dropped. - #exclude_lines: ["^DBG"] - - # Include lines. A list of regular expressions to match. It exports the lines that are - # matching any regular expression from the list. The include_lines is called before - # exclude_lines. By default, all the lines are exported. - #include_lines: ["^ERR", "^WARN"] - - # Exclude files. A list of regular expressions to match. Filebeat drops the files that - # are matching any regular expression from the list. By default, no files are dropped. - #exclude_files: [".gz$"] - - # Optional additional fields. These field can be freely picked - # to add additional information to the crawled log files for filtering - #fields: - # level: debug - # review: 1 - - # Set to true to store the additional fields as top level fields instead - # of under the "fields" sub-dictionary. In case of name conflicts with the - # fields added by Filebeat itself, the custom fields overwrite the default - # fields. - #fields_under_root: false - - # Ignore files which were modified more then the defined timespan in the past. - # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #ignore_older: 0 - - # Close older closes the file handler for which were not modified - # for longer then close_older - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_older: 1h - - # Type to be published in the 'type' field. For Elasticsearch output, - # the type defines the document type these entries should be stored - # in. Default: log - #document_type: log - - # Scan frequency in seconds. - # How often these files should be checked for changes. In case it is set - # to 0s, it is done as often as possible. Default: 10s - #scan_frequency: 10s - - # Defines the buffer size every harvester uses when fetching the file - #harvester_buffer_size: 16384 - - # Maximum number of bytes a single log event can have - # All bytes after max_bytes are discarded and not sent. The default is 10MB. - # This is especially useful for multiline log messages which can get large. - #max_bytes: 10485760 - - # Mutiline can be used for log messages spanning multiple lines. This is common - # for Java Stack Traces or C-Line Continuation - - # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ - #multiline.pattern: ^\[ - - # Defines if the pattern set under pattern should be negated or not. Default is false. - #multiline.negate: false - - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern - # that was (not) matched before or after or as long as a pattern is not matched based on negate. - # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash - #multiline.match: after - - # The maximum number of lines that are combined to one event. - # In case there are more the max_lines the additional lines are discarded. - # Default is 500 - #multiline.max_lines: 500 - - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event - # Default is 5s. - #multiline.timeout: 5s - - # Setting tail_files to true means filebeat starts reading new files at the end - # instead of the beginning. If this is used in combination with log rotation - # this can mean that the first entries of a new file are skipped. - #tail_files: false - - # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited - # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. - # Every time a new line appears, backoff is reset to the initial value. - #backoff: 1s - - # Max backoff defines what the maximum backoff time is. After having backed off multiple times - # from checking the files, the waiting time will never exceed max_backoff independent of the - # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log - # file after having backed off multiple times, it takes a maximum of 10s to read the new line - #max_backoff: 10s - - # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, - # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. - # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached - #backoff_factor: 2 - - # This option closes a file, as soon as the file name changes. - # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause - # issues when the file is removed, as the file will not be fully removed until also Filebeat closes - # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the - # same name can be created. Turning this feature on the other hand can lead to loss of data - # on rotate files. It can happen that after file rotation the beginning of the new - # file is skipped, as the reading starts at the end. We recommend to leave this option on false - # but lower the ignore_older value to release files faster. - #force_close_files: false - - # Additional stdin prospector - - # Configuration to use stdin input - #- input_type: stdin - - -######################### Filebeat general configuration ############################# - -#filebeat: +# Each - is a prospector. Below are the prospector specific configurations + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one prospector +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log prospector -------------------------------- +- input_type: log + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ["^DBG"] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ["^ERR", "^WARN"] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: [".gz$"] + + # Optional additional fields. These field can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Close older closes the file handler for which were not modified + # for longer then close_older + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_older: 1h + + # Type to be published in the 'type' field. For Elasticsearch output, + # the type defines the document type these entries should be stored + # in. Default: log + #document_type: log + + # Scan frequency in seconds. + # How often these files should be checked for changes. In case it is set + # to 0s, it is done as often as possible. Default: 10s + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + # Mutiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation - # Event count spool threshold - forces network flush if exceeded - #spool_size: 2048 + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # This option closes a file, as soon as the file name changes. + # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause + # issues when the file is removed, as the file will not be fully removed until also Filebeat closes + # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the + # same name can be created. Turning this feature on the other hand can lead to loss of data + # on rotate files. It can happen that after file rotation the beginning of the new + # file is skipped, as the reading starts at the end. We recommend to leave this option on false + # but lower the ignore_older value to release files faster. + #force_close_files: false + +#----------------------------- Stdin prospector ------------------------------- +# Configuration to use stdin input +#- input_type: stdin + +#========================= Filebeat global options ============================ + +# Event count spool threshold - forces network flush if exceeded +#filebeat.spool_size: 2048 + +# Enable async publisher pipeline in filebeat (Experimental!) +#filebeat.publish_async: false - # Enable async publisher pipeline in filebeat (Experimental!) - #publish_async: false +# Defines how often the spooler is flushed. After idle_timeout the spooler is +# Flush even though spool_size is not reached. +#filebeat.idle_timeout: 5s - # Defines how often the spooler is flushed. After idle_timeout the spooler is - # Flush even though spool_size is not reached. - #idle_timeout: 5s +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: registry - # Name of the registry file. If a relative path is used, it is considered relative to the - # data path. - #registry_file: registry +# +# These config files must have the full filebeat config part inside, but only +# the prospector part is processed. All global options like spool_size are ignored. +# The config_dir MUST point to a different directory then where the main filebeat config file is in. +#filebeat.config_dir: +#================================ General ===================================== - # Full Path to directory with additional prospector configuration files. Each file must end with .yml - # These config files must have the full filebeat config part inside, but only - # the prospector part is processed. All global options like spool_size are ignored. - # The config_dir MUST point to a different directory then where the main filebeat config file is in. - #config_dir: -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: -############################# Output ########################################## +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -297,6 +335,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -344,7 +383,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/filebeat" @@ -362,70 +401,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -436,10 +421,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -447,7 +437,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/libbeat/etc/libbeat.yml b/libbeat/etc/libbeat.yml index 7dccf6ddf47..2898ce946fb 100644 --- a/libbeat/etc/libbeat.yml +++ b/libbeat/etc/libbeat.yml @@ -1,13 +1,54 @@ -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +#================================ General ===================================== -############################# Output ########################################## + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -105,6 +146,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -152,7 +194,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/beatname" @@ -170,70 +212,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -244,10 +232,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -255,7 +248,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/metricbeat/etc/beat.yml b/metricbeat/etc/beat.yml index 734b51f9ff2..70ad61b95d4 100644 --- a/metricbeat/etc/beat.yml +++ b/metricbeat/etc/beat.yml @@ -1,85 +1,87 @@ -metricbeat.modules: +###################### Metricbeat Configuration Example ####################### - # Apache Module - - module: apache - metricsets: ["status"] - enabled: true - period: 1s +#========================== Modules configuration ============================ +metricbeat.modules: - # Apache hosts - hosts: ["http://127.0.0.1/"] +#----------------------------- Apache Module ---------------------------------- +- module: apache + metricsets: ["status"] + enabled: true + period: 1s - # Path to server status. Default server-status - #server_status_path: "server-status" + # Apache hosts + hosts: ["http://127.0.0.1/"] - # Username of hosts. Empty by default - #username: test + # Path to server status. Default server-status + #server_status_path: "server-status" - # Password of hosts. Empty by default - #password: test123 + # Username of hosts. Empty by default + #username: test - # MySQL Module - - module: mysql - metricsets: ["status"] - enabled: true - period: 2s + # Password of hosts. Empty by default + #password: test123 - # Host DSN should be defined as "tcp(127.0.0.1:3306)/" - # The username and password can either be set in the DSN or for all hosts in username and password config option - hosts: ["root@tcp(127.0.0.1:3306)/"] +#------------------------------- Mysql Module --------------------------------- +- module: mysql + metricsets: ["status"] + enabled: true + period: 2s - # Username of hosts. Empty by default - #username: root + # Host DSN should be defined as "tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or for all hosts in username and password config option + hosts: ["root@tcp(127.0.0.1:3306)/"] - # Password of hosts. Empty by default - #password: test + # Username of hosts. Empty by default + #username: root - # Redis Module - - module: redis - metricsets: ["info"] - enabled: true - period: 1s + # Password of hosts. Empty by default + #password: test - # Redis hosts - hosts: ["127.0.0.1:6379"] +#------------------------------- Redis Module --------------------------------- +- module: redis + metricsets: ["info"] + enabled: true + period: 1s - # Enabled defines if the module is enabled. Default: true - #enabled: true + # Redis hosts + hosts: ["127.0.0.1:6379"] - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s + # Enabled defines if the module is enabled. Default: true + #enabled: true - # Optional fields to be added to each event - #fields: - # datacenter: west + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s - # Network type to be used for redis connection. Default: tcp - #network: tcp + # Optional fields to be added to each event + #fields: + # datacenter: west - # Max number of concurrent connections. Default: 10 - #maxconn: 10 + # Network type to be used for redis connection. Default: tcp + #network: tcp - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] + # Max number of concurrent connections. Default: 10 + #maxconn: 10 - # Redis AUTH password. Empty by default. - #password: foobared + # Filters can be used to reduce the number of fields sent. + #filters: + # - include_fields: + # fields: ["stats"] - # System module - - module: system - metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] - enabled: true - period: 2s + # Redis AUTH password. Empty by default. + #password: foobared - # Zookeeper module - - module: zookeeper - metricsets: ["mntr"] - enabled: true - period: 5s - hosts: ["localhost:2181"] +#------------------------------ System Module --------------------------------- +- module: system + metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] + enabled: true + period: 2s +#---------------------------- Zookeeper Module -------------------------------- +- module: zookeeper + metricsets: ["mntr"] + enabled: true + period: 5s + hosts: ["localhost:2181"] diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 18afddcfe54..84f75ff2477 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -1,98 +1,141 @@ +###################### Metricbeat Configuration Example ####################### + +#========================== Modules configuration ============================ metricbeat.modules: - # Apache Module - - module: apache - metricsets: ["status"] - enabled: true - period: 1s +#----------------------------- Apache Module ---------------------------------- +- module: apache + metricsets: ["status"] + enabled: true + period: 1s + + # Apache hosts + hosts: ["http://127.0.0.1/"] - # Apache hosts - hosts: ["http://127.0.0.1/"] + # Path to server status. Default server-status + #server_status_path: "server-status" - # Path to server status. Default server-status - #server_status_path: "server-status" + # Username of hosts. Empty by default + #username: test - # Username of hosts. Empty by default - #username: test + # Password of hosts. Empty by default + #password: test123 - # Password of hosts. Empty by default - #password: test123 +#------------------------------- Mysql Module --------------------------------- +- module: mysql + metricsets: ["status"] + enabled: true + period: 2s - # MySQL Module - - module: mysql - metricsets: ["status"] - enabled: true - period: 2s + # Host DSN should be defined as "tcp(127.0.0.1:3306)/" + # The username and password can either be set in the DSN or for all hosts in username and password config option + hosts: ["root@tcp(127.0.0.1:3306)/"] - # Host DSN should be defined as "tcp(127.0.0.1:3306)/" - # The username and password can either be set in the DSN or for all hosts in username and password config option - hosts: ["root@tcp(127.0.0.1:3306)/"] + # Username of hosts. Empty by default + #username: root - # Username of hosts. Empty by default - #username: root + # Password of hosts. Empty by default + #password: test - # Password of hosts. Empty by default - #password: test +#------------------------------- Redis Module --------------------------------- +- module: redis + metricsets: ["info"] + enabled: true + period: 1s - # Redis Module - - module: redis - metricsets: ["info"] - enabled: true - period: 1s + # Redis hosts + hosts: ["127.0.0.1:6379"] - # Redis hosts - hosts: ["127.0.0.1:6379"] + # Enabled defines if the module is enabled. Default: true + #enabled: true - # Enabled defines if the module is enabled. Default: true - #enabled: true + # Timeout after which time a metricset should return an error + # Timeout is by default defined as period, as a fetch of a metricset + # should never take longer then period, as otherwise calls can pile up. + #timeout: 1s - # Timeout after which time a metricset should return an error - # Timeout is by default defined as period, as a fetch of a metricset - # should never take longer then period, as otherwise calls can pile up. - #timeout: 1s + # Optional fields to be added to each event + #fields: + # datacenter: west - # Optional fields to be added to each event - #fields: - # datacenter: west + # Network type to be used for redis connection. Default: tcp + #network: tcp - # Network type to be used for redis connection. Default: tcp - #network: tcp + # Max number of concurrent connections. Default: 10 + #maxconn: 10 - # Max number of concurrent connections. Default: 10 - #maxconn: 10 + # Filters can be used to reduce the number of fields sent. + #filters: + # - include_fields: + # fields: ["stats"] - # Filters can be used to reduce the number of fields sent. - #filters: - # - include_fields: - # fields: ["stats"] + # Redis AUTH password. Empty by default. + #password: foobared - # Redis AUTH password. Empty by default. - #password: foobared +#------------------------------ System Module --------------------------------- +- module: system + metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] + enabled: true + period: 2s - # System module - - module: system - metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"] - enabled: true - period: 2s +#---------------------------- Zookeeper Module -------------------------------- +- module: zookeeper + metricsets: ["mntr"] + enabled: true + period: 5s + hosts: ["localhost:2181"] - # Zookeeper module - - module: zookeeper - metricsets: ["mntr"] - enabled: true - period: 5s - hosts: ["localhost:2181"] +#================================ General ===================================== -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: -############################# Output ########################################## +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -190,6 +233,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -237,7 +281,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/metricbeat" @@ -255,70 +299,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -329,10 +319,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -340,7 +335,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/packetbeat/etc/beat.yml b/packetbeat/etc/beat.yml index 4fb3fb02cc0..1418e6960ca 100644 --- a/packetbeat/etc/beat.yml +++ b/packetbeat/etc/beat.yml @@ -1,4 +1,4 @@ -################### Packetbeat Configuration Example ########################## +###################### Packetbeat Configuration Example ####################### # This file contains an overview of various configuration settings. Please consult # the docs at https://www.elastic.co/guide/en/beats/packetbeat/current/packetbeat-configuration.html @@ -8,26 +8,22 @@ # application components. It inserts meta-data about each transaction into # Elasticsearch. -############################# Interfaces ######################################### +#================================ Interfaces ================================== # Select the network interfaces to sniff the data. You can use the "any" # keyword to sniff on all connected interfaces. -packetbeat.interfaces: - device: any +packetbeat.interfaces.device: any +#================================== Flows ===================================== -############################# Flows ############################################## +# Set network flow timeout. Flow is killed if no packet is received before being +# timed out. +packetbeat.flows.timeout: 30s -packetbeat.flows: - # Set network flow timeout. Flow is killed if no packet is received before being - # timed out. - #timeout: 30s +# Configure reporting period. If set to -1, only killed flows will be reported +packetbeat.flows.period: 10s - # Configure reporting period. If set to -1, only killed flows will be reported - #period: 10s - - -########################### Transaction protocols ################################ +#========================== Transaction protocols ============================= packetbeat.protocols.icmp: # Enable ICMPv4 and ICMPv6 monitoring. Default: false @@ -155,7 +151,7 @@ packetbeat.protocols.nfs: # the NFS protocol by commenting out the list of ports. ports: [2049] -########################## Monitored processes ################################ +#=========================== Monitored processes ============================== # Configure the processes to be monitored and how to find them. If a process is # monitored then Packetbeat attempts to use it's name to fill in the `proc` and diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index 2677a22301f..2cad20fbff8 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -1,4 +1,4 @@ -################### Packetbeat Configuration Example ########################## +###################### Packetbeat Configuration Example ####################### # This file contains an overview of various configuration settings. Please consult # the docs at https://www.elastic.co/guide/en/beats/packetbeat/current/packetbeat-configuration.html @@ -8,26 +8,22 @@ # application components. It inserts meta-data about each transaction into # Elasticsearch. -############################# Interfaces ######################################### +#================================ Interfaces ================================== # Select the network interfaces to sniff the data. You can use the "any" # keyword to sniff on all connected interfaces. -packetbeat.interfaces: - device: any +packetbeat.interfaces.device: any +#================================== Flows ===================================== -############################# Flows ############################################## +# Set network flow timeout. Flow is killed if no packet is received before being +# timed out. +packetbeat.flows.timeout: 30s -packetbeat.flows: - # Set network flow timeout. Flow is killed if no packet is received before being - # timed out. - #timeout: 30s +# Configure reporting period. If set to -1, only killed flows will be reported +packetbeat.flows.period: 10s - # Configure reporting period. If set to -1, only killed flows will be reported - #period: 10s - - -########################### Transaction protocols ################################ +#========================== Transaction protocols ============================= packetbeat.protocols.icmp: # Enable ICMPv4 and ICMPv6 monitoring. Default: false @@ -155,7 +151,7 @@ packetbeat.protocols.nfs: # the NFS protocol by commenting out the list of ports. ports: [2049] -########################## Monitored processes ################################ +#=========================== Monitored processes ============================== # Configure the processes to be monitored and how to find them. If a process is # monitored then Packetbeat attempts to use it's name to fill in the `proc` and @@ -179,16 +175,57 @@ packetbeat.protocols.nfs: # # - process: app # cmdline_grep: gunicorn -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +#================================ General ===================================== + + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true -############################# Output ########################################## +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -286,6 +323,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -333,7 +371,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/packetbeat" @@ -351,70 +389,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -425,10 +409,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -436,7 +425,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/topbeat/etc/beat.yml b/topbeat/etc/beat.yml index e7e1122e72a..74f8a888d6e 100644 --- a/topbeat/etc/beat.yml +++ b/topbeat/etc/beat.yml @@ -1,6 +1,6 @@ -################### Topbeat Configuration Example ######################### +###################### Topbeat Configuration Example ########################## -##################### Topbeat general configuration ####################### +#======================== Topbeat specific options ============================ # In seconds, defines how often to read server statistics topbeat.period: 10 @@ -23,4 +23,3 @@ topbeat.stats: # cpu usage per core, by default is false cpu_per_core: false - diff --git a/topbeat/topbeat.yml b/topbeat/topbeat.yml index 5a44f398cc7..6d2f419363b 100644 --- a/topbeat/topbeat.yml +++ b/topbeat/topbeat.yml @@ -1,6 +1,6 @@ -################### Topbeat Configuration Example ######################### +###################### Topbeat Configuration Example ########################## -##################### Topbeat general configuration ####################### +#======================== Topbeat specific options ============================ # In seconds, defines how often to read server statistics topbeat.period: 10 @@ -23,17 +23,57 @@ topbeat.stats: # cpu usage per core, by default is false cpu_per_core: false +#================================ General ===================================== -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features -############################# Output ########################################## +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -131,6 +171,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -178,7 +219,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/topbeat" @@ -196,70 +237,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -270,10 +257,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -281,7 +273,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 diff --git a/winlogbeat/etc/beat.yml b/winlogbeat/etc/beat.yml index 59c479df8b6..d80ea07a2bc 100644 --- a/winlogbeat/etc/beat.yml +++ b/winlogbeat/etc/beat.yml @@ -1,18 +1,15 @@ -################### Winlogbeat Configuration Example ######################### +###################### Winlogbeat Configuration Example ####################### -##################### Winlogbeat general configuration ###################### +# The registry file is where Winlogbeat persists its state so that the beat +# can resume after shutdown or an outage. The default is .winlogbeat.yml +# in the directory in which it was started. +#winlogbeat.registry_file: .winlogbeat.yml -winlogbeat: - # The registry file is where Winlogbeat persists its state so that the beat - # can resume after shutdown or an outage. The default is .winlogbeat.yml - # in the directory in which it was started. - #registry_file: .winlogbeat.yml - - # Diagnostic metrics that can retrieved through a web interface if a - # bindaddress value (host:port) is specified. The web address will be - # http:///debug/vars - #metrics: - # bindaddress: 'localhost:8123' +# Diagnostic metrics that can retrieved through a web interface if a +# bindaddress value (host:port) is specified. The web address will be +# http:///debug/vars +#winlogbeat.metrics: +# bindaddress: 'localhost:8123' # event_logs specifies a list of event logs to monitor as well as any # accompanying options. The YAML data type of event_logs is a list of diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 703a4fd3820..c1a4f9a3817 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -1,18 +1,15 @@ -################### Winlogbeat Configuration Example ######################### +###################### Winlogbeat Configuration Example ####################### -##################### Winlogbeat general configuration ###################### +# The registry file is where Winlogbeat persists its state so that the beat +# can resume after shutdown or an outage. The default is .winlogbeat.yml +# in the directory in which it was started. +#winlogbeat.registry_file: .winlogbeat.yml -#winlogbeat: - # The registry file is where Winlogbeat persists its state so that the beat - # can resume after shutdown or an outage. The default is .winlogbeat.yml - # in the directory in which it was started. - #registry_file: .winlogbeat.yml - - # Diagnostic metrics that can retrieved through a web interface if a - # bindaddress value (host:port) is specified. The web address will be - # http:///debug/vars - #metrics: - # bindaddress: 'localhost:8123' +# Diagnostic metrics that can retrieved through a web interface if a +# bindaddress value (host:port) is specified. The web address will be +# http:///debug/vars +#winlogbeat.metrics: +# bindaddress: 'localhost:8123' # event_logs specifies a list of event logs to monitor as well as any # accompanying options. The YAML data type of event_logs is a list of @@ -29,16 +26,57 @@ winlogbeat.event_logs: - name: System -############################################################################### -############################# Libbeat Config ################################## -# Base config file used by all other beats for using libbeat features +#================================ General ===================================== + + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: -############################# Output ########################################## +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Uncomment the following if you want to ignore transactions created +# by the server on which the shipper is installed. This option is useful +# to remove duplicates if shippers are installed on multiple servers. +#ignore_outgoing: true + +# How often (in seconds) shippers are publishing their IPs to the topology map. +# The default is 10 seconds. +#refresh_topology_freq: 10 + +# Expiration time (in seconds) of the IPs published by a shipper to the topology map. +# All the IPs will be deleted afterwards. Note, that the value must be higher than +# refresh_topology_freq. The default is 15 seconds. +#topology_expire: 15 + +# Internal queue size for single events in processing pipeline +#queue_size: 1000 + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Outputs ===================================== # Configure what outputs to use when sending the data collected by the beat. # Multiple outputs may be used. -### Elasticsearch as output +#-------------------------- Elasticsearch output ------------------------------ output.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) @@ -136,6 +174,7 @@ output.elasticsearch: #tls.max_version: 1.2 +#----------------------------- Logstash output -------------------------------- ### Logstash as output #output.logstash: # The Logstash hosts @@ -183,7 +222,7 @@ output.elasticsearch: #tls.curve_types: [] -### File as output +#------------------------------- File output ---------------------------------- #output.file: # Path to the directory where to save the generated files. The option is mandatory. #path: "/tmp/winlogbeat" @@ -201,70 +240,16 @@ output.elasticsearch: #number_of_files: 7 -### Console output +#----------------------------- Console output --------------------------------- #output.console: # Pretty print json event #pretty: false - -############################# General ######################################### - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -#tags: ["service-X", "web-tier"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -#fields: -# env: staging - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -#fields_under_root: false - -# Uncomment the following if you want to ignore transactions created -# by the server on which the shipper is installed. This option is useful -# to remove duplicates if shippers are installed on multiple servers. -#ignore_outgoing: true - -# How often (in seconds) shippers are publishing their IPs to the topology map. -# The default is 10 seconds. -#refresh_topology_freq: 10 - -# Expiration time (in seconds) of the IPs published by a shipper to the topology map. -# All the IPs will be deleted afterwards. Note, that the value must be higher than -# refresh_topology_freq. The default is 15 seconds. -#topology_expire: 15 - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -############################# Logging ######################################### - +#================================ Logging ===================================== # There are three options for the log output: syslog, file, stderr. # Under Windows systems, the log files are per default sent to the file output, # under all other system per default to syslog. -#logging: - - # Send all logging output to syslog. On Windows default is false, otherwise - # default is true. - #to_syslog: true - # Write all logging output to files. Beats automatically rotate files if rotateeverybytes - # limit is reached. - #to_files: false # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are beat, publish, service @@ -275,10 +260,15 @@ output.elasticsearch: # Available log levels are: critical, error, warning, info, debug #level: error +# Send all logging output to syslog. The default is false. +#logging.to_syslog: true -# To enable logging to files, to_files option has to be set to true -# The directory where the log files will written to. +# Logging to rotating files files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). #path: /var/log/mybeat # The name of the files where the logs are written to. @@ -286,7 +276,7 @@ logging.files: # Configure log file size limit. If limit is reached, log file will be # automatically rotated - rotateeverybytes: 10485760 # = 10MB + #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7