Skip to content

Commit

Permalink
Configuration files restyling
Browse files Browse the repository at this point in the history
Little changes content wise, but some style changes:

* Use ###, ===, and --- headers to indicate hierarchy. This makes it
  easier to skim the config file. Idea stolen from elasticsearch.yml
* Unindent filebeat prospectors and metricbeat modules by one level.
  This is still valid yaml, and fewer spaces make it harder to mess up the
  white spaces.
* Reorganized a bit the logging config
* Moved the "general" libbeat section before the outputs
* Other fairly minor changes to the beats yaml files.

This is a follow up of elastic#1544 and part of elastic#1417.
  • Loading branch information
Tudor Golubenco committed May 12, 2016
1 parent c49abfe commit e8b04f0
Show file tree
Hide file tree
Showing 11 changed files with 881 additions and 941 deletions.
363 changes: 180 additions & 183 deletions filebeat/etc/beat.yml

Large diffs are not rendered by default.

484 changes: 237 additions & 247 deletions filebeat/filebeat.yml

Large diffs are not rendered by default.

123 changes: 58 additions & 65 deletions libbeat/etc/libbeat.yml
Original file line number Diff line number Diff line change
@@ -1,13 +1,54 @@
###############################################################################
############################# Libbeat Config ##################################
# Base config file used by all other beats for using libbeat features
#================================ General =====================================

############################# Output ##########################################

# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:

# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]

# Optional fields that you can specify to add additional information to the
# output. Fields can be scalar values, arrays, dictionaries, or any nested
# combination of these.
#fields:
# env: staging

# If this option is set to true, the custom fields are stored as top-level
# fields in the output document instead of being grouped under a fields
# sub-dictionary. Default is false.
#fields_under_root: false

# Uncomment the following if you want to ignore transactions created
# by the server on which the shipper is installed. This option is useful
# to remove duplicates if shippers are installed on multiple servers.
#ignore_outgoing: true

# How often (in seconds) shippers are publishing their IPs to the topology map.
# The default is 10 seconds.
#refresh_topology_freq: 10

# Expiration time (in seconds) of the IPs published by a shipper to the topology map.
# All the IPs will be deleted afterwards. Note, that the value must be higher than
# refresh_topology_freq. The default is 15 seconds.
#topology_expire: 15

# Internal queue size for single events in processing pipeline
#queue_size: 1000

# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:

#================================ Outputs =====================================

# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.

### Elasticsearch as output
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
Expand Down Expand Up @@ -105,6 +146,7 @@ output.elasticsearch:
#tls.max_version: 1.2


#----------------------------- Logstash output --------------------------------
### Logstash as output
#output.logstash:
# The Logstash hosts
Expand Down Expand Up @@ -152,7 +194,7 @@ output.elasticsearch:
#tls.curve_types: []


### File as output
#------------------------------- File output ----------------------------------
#output.file:
# Path to the directory where to save the generated files. The option is mandatory.
#path: "/tmp/beatname"
Expand All @@ -170,70 +212,16 @@ output.elasticsearch:
#number_of_files: 7


### Console output
#----------------------------- Console output ---------------------------------
#output.console:
# Pretty print json event
#pretty: false


############################# General #########################################

# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
# If this options is not defined, the hostname is used.
#name:

# The tags of the shipper are included in their own field with each
# transaction published. Tags make it easy to group servers by different
# logical properties.
#tags: ["service-X", "web-tier"]

# Optional fields that you can specify to add additional information to the
# output. Fields can be scalar values, arrays, dictionaries, or any nested
# combination of these.
#fields:
# env: staging

# If this option is set to true, the custom fields are stored as top-level
# fields in the output document instead of being grouped under a fields
# sub-dictionary. Default is false.
#fields_under_root: false

# Uncomment the following if you want to ignore transactions created
# by the server on which the shipper is installed. This option is useful
# to remove duplicates if shippers are installed on multiple servers.
#ignore_outgoing: true

# How often (in seconds) shippers are publishing their IPs to the topology map.
# The default is 10 seconds.
#refresh_topology_freq: 10

# Expiration time (in seconds) of the IPs published by a shipper to the topology map.
# All the IPs will be deleted afterwards. Note, that the value must be higher than
# refresh_topology_freq. The default is 15 seconds.
#topology_expire: 15

# Internal queue size for single events in processing pipeline
#queue_size: 1000

# Sets the maximum number of CPUs that can be executing simultaneously. The
# default is the number of logical CPUs available in the system.
#max_procs:

############################# Logging #########################################

#================================ Logging =====================================
# There are three options for the log output: syslog, file, stderr.
# Under Windows systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
#logging:

# Send all logging output to syslog. On Windows default is false, otherwise
# default is true.
#to_syslog: true

# Write all logging output to files. Beats automatically rotate files if rotateeverybytes
# limit is reached.
#to_files: false

# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are beat, publish, service
Expand All @@ -244,18 +232,23 @@ output.elasticsearch:
# Available log levels are: critical, error, warning, info, debug
#level: error

# Send all logging output to syslog. The default is false.
#logging.to_syslog: true

# To enable logging to files, to_files option has to be set to true
# The directory where the log files will written to.
# Logging to rotating files files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
#path: /var/log/mybeat

# The name of the files where the logs are written to.
#name: mybeat

# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
#rotateeverybytes: 10485760 # = 10MB

# Number of rotated log files to keep. Oldest files will be deleted first.
#keepfiles: 7
128 changes: 65 additions & 63 deletions metricbeat/etc/beat.yml
Original file line number Diff line number Diff line change
@@ -1,85 +1,87 @@
metricbeat.modules:
###################### Metricbeat Configuration Example #######################

# Apache Module
- module: apache
metricsets: ["status"]
enabled: true
period: 1s
#========================== Modules configuration ============================
metricbeat.modules:

# Apache hosts
hosts: ["http://127.0.0.1/"]
#----------------------------- Apache Module ----------------------------------
- module: apache
metricsets: ["status"]
enabled: true
period: 1s

# Path to server status. Default server-status
#server_status_path: "server-status"
# Apache hosts
hosts: ["http://127.0.0.1/"]

# Username of hosts. Empty by default
#username: test
# Path to server status. Default server-status
#server_status_path: "server-status"

# Password of hosts. Empty by default
#password: test123
# Username of hosts. Empty by default
#username: test

# MySQL Module
- module: mysql
metricsets: ["status"]
enabled: true
period: 2s
# Password of hosts. Empty by default
#password: test123

# Host DSN should be defined as "tcp(127.0.0.1:3306)/"
# The username and password can either be set in the DSN or for all hosts in username and password config option
hosts: ["root@tcp(127.0.0.1:3306)/"]
#------------------------------- Mysql Module ---------------------------------
- module: mysql
metricsets: ["status"]
enabled: true
period: 2s

# Username of hosts. Empty by default
#username: root
# Host DSN should be defined as "tcp(127.0.0.1:3306)/"
# The username and password can either be set in the DSN or for all hosts in username and password config option
hosts: ["root@tcp(127.0.0.1:3306)/"]

# Password of hosts. Empty by default
#password: test
# Username of hosts. Empty by default
#username: root

# Redis Module
- module: redis
metricsets: ["info"]
enabled: true
period: 1s
# Password of hosts. Empty by default
#password: test

# Redis hosts
hosts: ["127.0.0.1:6379"]
#------------------------------- Redis Module ---------------------------------
- module: redis
metricsets: ["info"]
enabled: true
period: 1s

# Enabled defines if the module is enabled. Default: true
#enabled: true
# Redis hosts
hosts: ["127.0.0.1:6379"]

# Timeout after which time a metricset should return an error
# Timeout is by default defined as period, as a fetch of a metricset
# should never take longer then period, as otherwise calls can pile up.
#timeout: 1s
# Enabled defines if the module is enabled. Default: true
#enabled: true

# Optional fields to be added to each event
#fields:
# datacenter: west
# Timeout after which time a metricset should return an error
# Timeout is by default defined as period, as a fetch of a metricset
# should never take longer then period, as otherwise calls can pile up.
#timeout: 1s

# Network type to be used for redis connection. Default: tcp
#network: tcp
# Optional fields to be added to each event
#fields:
# datacenter: west

# Max number of concurrent connections. Default: 10
#maxconn: 10
# Network type to be used for redis connection. Default: tcp
#network: tcp

# Filters can be used to reduce the number of fields sent.
#filters:
# - include_fields:
# fields: ["stats"]
# Max number of concurrent connections. Default: 10
#maxconn: 10

# Redis AUTH password. Empty by default.
#password: foobared
# Filters can be used to reduce the number of fields sent.
#filters:
# - include_fields:
# fields: ["stats"]

# System module
- module: system
metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"]
enabled: true
period: 2s
# Redis AUTH password. Empty by default.
#password: foobared

# Zookeeper module
- module: zookeeper
metricsets: ["mntr"]
enabled: true
period: 5s
hosts: ["localhost:2181"]
#------------------------------ System Module ---------------------------------
- module: system
metricsets: ["cpu", "cores", "filesystem", "fsstats", "memory", "process"]
enabled: true
period: 2s

#---------------------------- Zookeeper Module --------------------------------
- module: zookeeper
metricsets: ["mntr"]
enabled: true
period: 5s
hosts: ["localhost:2181"]

Loading

0 comments on commit e8b04f0

Please sign in to comment.