diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 28c609cfb65..01ae4b91f99 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -26,7 +26,6 @@ https://github.com/elastic/beats/compare/v5.0.0-alpha4...master[Check the HEAD d *Filebeat* - Stop following symlink. Symlinks are now ignored: {pull}1686[1686] -- Deprecate force_close_files option and replace it with close_removed and close_renamed {issue}1600[1600] *Winlogbeat* @@ -85,6 +84,9 @@ https://github.com/elastic/beats/compare/v5.0.0-alpha4...master[Check the HEAD d *Topbeat* *Filebeat* +- Deprecate close_older option and replace it with close_inactive {issue}2051[2051] +- Deprecate force_close_files option and replace it with close_removed and close_renamed {issue}1600[1600] + *Winlogbeat* diff --git a/filebeat/docs/reference/configuration/filebeat-options.asciidoc b/filebeat/docs/reference/configuration/filebeat-options.asciidoc index c99918a3137..e00b891d844 100644 --- a/filebeat/docs/reference/configuration/filebeat-options.asciidoc +++ b/filebeat/docs/reference/configuration/filebeat-options.asciidoc @@ -180,28 +180,28 @@ For comparison, `ignore_older` relies on the modification time of the file. In c `ignore_older` can be especially useful if you keep log files for a long time and you start filebeat, but only want to send the newest files to elasticsearch and the old files from the last week, but not all files. -To remove the state from the registry file for files which were harvested before, the `clean_idle` configuration option has to be used. +To remove the state from the registry file for files which were harvested before, the `clean_inactive` configuration option has to be used. -Requirement: ignore_older > close_idle +Requirement: ignore_older > close_inactive -Before a file can be ignored by the prospector, it must be closed. To ensure a file is not harvested anymore when it is ignored, ignore_older must be set to a longer duration then `close_idle`. It can happen, that a file is still harvested but already falls under `ignore_older` as the harvester didn't finish yet. The harvester will finish reading and close it after `close_idle` is reached. +Before a file can be ignored by the prospector, it must be closed. To ensure a file is not harvested anymore when it is ignored, ignore_older must be set to a longer duration then `close_inactive`. It can happen, that a file is still harvested but already falls under `ignore_older` as the harvester didn't finish yet. The harvester will finish reading and close it after `close_inactive` is reached. [[close-options]] ===== close_* All `close_*` configuration options are used to close the harvester after a certain criteria or time. Closing the harvester means closing the file handler. In case a file is updated again after the harvester is closed, it will be picked up again after <>. It is important to understand, in case the file was moved away or deleted during this period, filebeat will not be able to pick up the file again and any data that the harvester didn't read so far is lost. -[[close-idle]] -===== close_idle +[[close-inactive]] +===== close_inactive -After a file was not harvested for the duration of `close_idle`, the file handle will be closed. The counter for the defined period starts when the last log line was read by the harvester, it is not based on the modification time of the file. In case the closed file changes again, a new harvester is started again, latest after `scan_frequency`. +After a file was not harvested for the duration of `close_inactive`, the file handle will be closed. The counter for the defined period starts when the last log line was read by the harvester, it is not based on the modification time of the file. In case the closed file changes again, a new harvester is started again, latest after `scan_frequency`. -It is recommended to set `close_idle` to a value that is larger then the least frequent updates to your log file. In case your log file gets updated every few seconds, you can safely set it to `1m`. If there are log files with very different update rates, multiple prospector configurations with different values can be used. +It is recommended to set `close_inactive` to a value that is larger then the least frequent updates to your log file. In case your log file gets updated every few seconds, you can safely set it to `1m`. If there are log files with very different update rates, multiple prospector configurations with different values can be used. -Setting `close_idle` to a lower value means file handles are closed faster but has the side affect that new log lines are not sent in near real time in case the harvester was closed. +Setting `close_inactive` to a lower value means file handles are closed faster but has the side affect that new log lines are not sent in near real time in case the harvester was closed. -The timestamp for closing a file does not depend on the modification time of the file but an internal timestamp that is update when the file was last harvested. If `close_idle` is set to 5 minutes, the countdown for the 5 minutes starts the last time the harvester read a line from the file. +The timestamp for closing a file does not depend on the modification time of the file but an internal timestamp that is update when the file was last harvested. If `close_inactive` is set to 5 minutes, the countdown for the 5 minutes starts the last time the harvester read a line from the file. You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 1h. @@ -218,7 +218,7 @@ WINDOWS: In case under windows your log rotation system shows errors because it WARNING: Only use this options if you understand the potential side affects with potential data loss. -Close removed can be used to close a harvester directly when a file is removed. Normally a file should only be removed after it already falls under `close_idle`. In case files are removed early, without this option filebeat keeps the file open to make sure finishing is completed. In case the file handle should be released immediately after removal, this option can be used. +Close removed can be used to close a harvester directly when a file is removed. Normally a file should only be removed after it already falls under `close_inactive`. In case files are removed early, without this option filebeat keeps the file open to make sure finishing is completed. In case the file handle should be released immediately after removal, this option can be used. WINDOWS: In case under windows your log rotation system shows error because it can't rotated the files, this is the option to enabled. @@ -241,17 +241,17 @@ Close timeout gives every harvester a predefined lifetime. Independent of the lo The `clean_*` variables are used to clean up the state entries. This helps to reduce the size of the registry file and can prevent a potential <>. These options are disabled by default as wrong settings can lead to data duplicatin as complete log files are sent again. -===== clean_idle +===== clean_inactive WARNING: Only use this options if you understand the potential side affects with potential data loss. -`clean_idle` removes the state of the file after the given period. The state for files can only be removed if the file is already ignored by filebeat, means it's falling under `ignore_older`. The requirement for clean idle is `clean_idle > ignore_older + scan_frequency` to make sure no states are removed when a file is still harvested. Otherwise it could lead to resending the full content constantly as clean_idle removes state for files which are still detected by the prospector. In case a file is updated or appears again, the file is read from the beginning. +`clean_inactive` removes the state of the file after the given period. The state for files can only be removed if the file is already ignored by filebeat, means it's falling under `ignore_older`. The requirement for clean idle is `clean_inactive > ignore_older + scan_frequency` to make sure no states are removed when a file is still harvested. Otherwise it could lead to resending the full content constantly as `clean_inactive` removes state for files which are still detected by the prospector. In case a file is updated or appears again, the file is read from the beginning. -The `clean_idle` configuration option is useful to reduce the size of the registry file, especially if a large amount of new files are generated every day. +The `clean_inactive` configuration option is useful to reduce the size of the registry file, especially if a large amount of new files are generated every day. In addition this config option is useful to prevent the <>. If a file is deleted, the inode can be reused by a newly created file. If the inode is the same, filebeat assumes to know the file and continues at the old position. As this issues gets more probable over time, it is good to cleanup the old states to make sure filebeat does not assume it already knows the file. -NOTE: Every time a file is renamed, the file state will be updated and the counter for `clean_idle` will start at 0 again. +NOTE: Every time a file is renamed, the file state will be updated and the counter for `clean_inactive` will start at 0 again. ===== clean_removed @@ -270,7 +270,7 @@ directory is scanned for files using the frequency specified by `scan_frequency`. Specify 1s to scan the directory as frequently as possible without causing Filebeat to scan too frequently. We do not recommend to set this value `<1s`. -If you require log lines to be sent in near real time do not use a very low `scan_frequency` but adjust `close_idle` so the file handler stays open and constantly polls your files. +If you require log lines to be sent in near real time do not use a very low `scan_frequency` but adjust `close_inactive` so the file handler stays open and constantly polls your files. The default setting is 10s. diff --git a/filebeat/docs/troubleshooting.asciidoc b/filebeat/docs/troubleshooting.asciidoc index e355ad4be57..1e43dcd5344 100644 --- a/filebeat/docs/troubleshooting.asciidoc +++ b/filebeat/docs/troubleshooting.asciidoc @@ -15,7 +15,7 @@ include::../../libbeat/docs/getting-help.asciidoc[] == Reduce open file handlers -Filebeat keeps the file handler open in case it reaches the end of a file to read new log lines in near real time. If filebeat is harvesting a large number of files, the number of open files can be become an issue. In most environments, the number of files which are actively updated is low. The configuration `close_idle` should be set accordingly to close files which are not active any more. +Filebeat keeps the file handler open in case it reaches the end of a file to read new log lines in near real time. If filebeat is harvesting a large number of files, the number of open files can be become an issue. In most environments, the number of files which are actively updated is low. The configuration `close_inactive` should be set accordingly to close files which are not active any more. There are 4 more configuration options which can be used to close file handlers, but all of them should be used carefully as they can side affects. The options are: @@ -32,16 +32,16 @@ Before using any of these variables, make sure to study the documentation on eac [[reduce-registry-size]] == Reduce Registry File Size -Filebeat keeps all states of the files and persists the states on disk in the `registry_file`. The states are used to continue file reading at a previous position in case filebeat is restarted. In case every day a large amount of new files is constantly produced, the registry file grows over time. To reduce the size of the registry file, there are two configuration variables: `clean_removed` and `clean_idle`. +Filebeat keeps all states of the files and persists the states on disk in the `registry_file`. The states are used to continue file reading at a previous position in case filebeat is restarted. In case every day a large amount of new files is constantly produced, the registry file grows over time. To reduce the size of the registry file, there are two configuration variables: `clean_removed` and `close_inactive`. -In case old files are not touched anymore and fall under `ignore_older`, it is recommended to use `clean_idle`. If on the other size old files get removed from disk `clean_removed` can be used. +In case old files are not touched anymore and fall under `ignore_older`, it is recommended to use `clean_inactive`. If on the other size old files get removed from disk `clean_removed` can be used. [[inode-reuse-issue]] == Inode Reuse Issue Filebeat uses under linux inode and device to identify files. In case a file is removed from disk, the inode can again be assigned to a new file. In the case of file rotation where and old file is removed and a new one is directly created afterwards, it can happen that the new files has the exact same inode. In this case, Filebeat assumes that the new file is the same as the old and tries to continue reading at the old position which is not correct. -By default states are never removed from the registry file. In case of inode reuse issue it is recommended to use the `clean_*` options, especially `clean_idle`. In case your files get rotated every 24 hours and the rotated files rotated files are not updated anymore, `ignore_older` could be set to 48 hours and `clean_idle` 72 hours. +By default states are never removed from the registry file. In case of inode reuse issue it is recommended to use the `clean_*` options, especially `clean_inactive`. In case your files get rotated every 24 hours and the rotated files rotated files are not updated anymore, `ignore_older` could be set to 48 hours and `clean_inactive` 72 hours. `clean_removed` can be used for files that are removed from disk. Be aware that `clean_removed` also applies if during one scan a file cannot be found anymore. In case the file shows up at a later stage again, it will be sent again from scratch. diff --git a/filebeat/etc/beat.full.yml b/filebeat/etc/beat.full.yml index 4de3206e2ea..2d88ba7ee65 100644 --- a/filebeat/etc/beat.full.yml +++ b/filebeat/etc/beat.full.yml @@ -163,10 +163,10 @@ filebeat.prospectors: ### Harvester closing options - # Close idle closes the file handler after the predefined period. + # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_idle: 1h + #close_inactive: 1h # Close renamed closes a file handler when the file is renamed or rotated. # Note: Potential data loss. Make sure to read and understand the docs for this option. @@ -191,9 +191,9 @@ filebeat.prospectors: ### State options - # Files for the modification data is older then clean_older the state from the registry is removed + # Files for the modification data is older then clean_inactive the state from the registry is removed # By default this is disabled. - #clean_idle: 0 + #clean_inactive: 0 # Removes the state for file which cannot be found on disk anymore immediately #clean_removed: false diff --git a/filebeat/filebeat.full.yml b/filebeat/filebeat.full.yml index 9f8b91bde14..920cfcfd159 100644 --- a/filebeat/filebeat.full.yml +++ b/filebeat/filebeat.full.yml @@ -163,10 +163,10 @@ filebeat.prospectors: ### Harvester closing options - # Close idle closes the file handler after the predefined period. + # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. - #close_idle: 1h + #close_inactive: 1h # Close renamed closes a file handler when the file is renamed or rotated. # Note: Potential data loss. Make sure to read and understand the docs for this option. @@ -191,9 +191,9 @@ filebeat.prospectors: ### State options - # Files for the modification data is older then clean_older the state from the registry is removed + # Files for the modification data is older then clean_inactive the state from the registry is removed # By default this is disabled. - #clean_idle: 0 + #clean_inactive: 0 # Removes the state for file which cannot be found on disk anymore immediately #clean_removed: false @@ -273,8 +273,8 @@ filebeat.prospectors: #================================ Processors ===================================== -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external meta data. This section defines a list of processors +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external meta data. This section defines a list of processors # that are applied one by one and the first one receives the initial event: # # event -> filter1 -> event1 -> filter2 ->event2 ... @@ -380,7 +380,7 @@ output.elasticsearch: #template.overwrite: false # If set to true, filebeat checks the Elasticsearch version at connect time, and if it - # is 2.x, it loads the file specified by the template.versions.2x.path setting. The + # is 2.x, it loads the file specified by the template.versions.2x.path setting. The # default is true. #template.versions.2x.enabled: true diff --git a/filebeat/harvester/config.go b/filebeat/harvester/config.go index 703de8f9005..59658830fca 100644 --- a/filebeat/harvester/config.go +++ b/filebeat/harvester/config.go @@ -22,7 +22,7 @@ var ( Backoff: 1 * time.Second, BackoffFactor: 2, MaxBackoff: 10 * time.Second, - CloseOlder: 1 * time.Hour, + CloseInactive: 1 * time.Hour, MaxBytes: 10 * humanize.MiByte, CloseRemoved: false, CloseRenamed: false, @@ -41,6 +41,7 @@ type harvesterConfig struct { Backoff time.Duration `config:"backoff" validate:"min=0,nonzero"` BackoffFactor int `config:"backoff_factor" validate:"min=1"` MaxBackoff time.Duration `config:"max_backoff" validate:"min=0,nonzero"` + CloseInactive time.Duration `config:"close_inactive"` CloseOlder time.Duration `config:"close_older"` CloseRemoved bool `config:"close_removed"` CloseRenamed bool `config:"close_renamed"` @@ -62,6 +63,12 @@ func (config *harvesterConfig) Validate() error { logp.Warn("DEPRECATED: force_close_files was set to true. Use close_removed + close_rename") } + // DEPRECATED: remove in 6.0 + if config.CloseOlder > 0 { + config.CloseInactive = config.CloseOlder + logp.Warn("DEPRECATED: close_older is deprecated. Use close_inactive") + } + // Check input type if _, ok := cfg.ValidInputType[config.InputType]; !ok { return fmt.Errorf("Invalid input type: %v", config.InputType) diff --git a/filebeat/harvester/config_test.go b/filebeat/harvester/config_test.go index b64e14b8e30..430501db298 100644 --- a/filebeat/harvester/config_test.go +++ b/filebeat/harvester/config_test.go @@ -2,6 +2,7 @@ package harvester import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -20,3 +21,15 @@ func TestForceCloseFiles(t *testing.T) { assert.True(t, config.CloseRemoved) assert.True(t, config.CloseRenamed) } + +func TestCloseOlder(t *testing.T) { + + config := defaultConfig + assert.Equal(t, config.CloseOlder, 0*time.Hour) + assert.Equal(t, config.CloseInactive, defaultConfig.CloseInactive) + + config.CloseOlder = 5 * time.Hour + config.Validate() + + assert.Equal(t, config.CloseInactive, 5*time.Hour) +} diff --git a/filebeat/harvester/log.go b/filebeat/harvester/log.go index 98a0e2d838d..0bb6ec7d235 100644 --- a/filebeat/harvester/log.go +++ b/filebeat/harvester/log.go @@ -277,7 +277,7 @@ func (h *Harvester) newLogFileReaderConfig() reader.LogFileReaderConfig { return reader.LogFileReaderConfig{ CloseRemoved: h.config.CloseRemoved, CloseRenamed: h.config.CloseRenamed, - CloseOlder: h.config.CloseOlder, + CloseInactive: h.config.CloseInactive, CloseEOF: h.config.CloseEOF, Backoff: h.config.Backoff, MaxBackoff: h.config.MaxBackoff, diff --git a/filebeat/harvester/log_test.go b/filebeat/harvester/log_test.go index 52cf4060257..5f46910977b 100644 --- a/filebeat/harvester/log_test.go +++ b/filebeat/harvester/log_test.go @@ -59,7 +59,7 @@ func TestReadLine(t *testing.T) { h := Harvester{ config: harvesterConfig{ - CloseOlder: 500 * time.Millisecond, + CloseInactive: 500 * time.Millisecond, Backoff: 100 * time.Millisecond, MaxBackoff: 1 * time.Second, BackoffFactor: 2, diff --git a/filebeat/harvester/reader/log.go b/filebeat/harvester/reader/log.go index d2db10dc5e3..1fc19c119c1 100644 --- a/filebeat/harvester/reader/log.go +++ b/filebeat/harvester/reader/log.go @@ -32,7 +32,7 @@ type LogFileReaderConfig struct { MaxBackoff time.Duration BackoffFactor int CloseEOF bool - CloseOlder time.Duration + CloseInactive time.Duration CloseRenamed bool CloseRemoved bool } @@ -129,9 +129,9 @@ func (r *logFileReader) errorChecks(err error) error { return ErrFileTruncate } - // Check file wasn't read for longer then CloseOlder + // Check file wasn't read for longer then CloseInactive age := time.Since(r.lastTimeRead) - if age > r.config.CloseOlder { + if age > r.config.CloseInactive { return ErrInactive } diff --git a/filebeat/prospector/config.go b/filebeat/prospector/config.go index 6933491166e..a38a2077bb8 100644 --- a/filebeat/prospector/config.go +++ b/filebeat/prospector/config.go @@ -13,7 +13,7 @@ var ( IgnoreOlder: 0, ScanFrequency: 10 * time.Second, InputType: cfg.DefaultInputType, - CleanOlder: 0, + CleanInactive: 0, CleanRemoved: false, } ) @@ -24,7 +24,7 @@ type prospectorConfig struct { Paths []string `config:"paths"` ScanFrequency time.Duration `config:"scan_frequency"` InputType string `config:"input_type"` - CleanOlder time.Duration `config:"clean_older" validate:"min=0"` + CleanInactive time.Duration `config:"clean_inactive" validate:"min=0"` CleanRemoved bool `config:"clean_removed"` } diff --git a/filebeat/prospector/prospector.go b/filebeat/prospector/prospector.go index 53190479273..2fc95db77a5 100644 --- a/filebeat/prospector/prospector.go +++ b/filebeat/prospector/prospector.go @@ -106,8 +106,8 @@ func (p *Prospector) Run() { return case event := <-p.harvesterChan: // Add ttl if cleanOlder is enabled - if p.config.CleanOlder > 0 { - event.State.TTL = p.config.CleanOlder + if p.config.CleanInactive > 0 { + event.State.TTL = p.config.CleanInactive } select { case <-p.done: diff --git a/filebeat/prospector/prospector_log.go b/filebeat/prospector/prospector_log.go index d6ddae5dfbf..5496f37e891 100644 --- a/filebeat/prospector/prospector_log.go +++ b/filebeat/prospector/prospector_log.go @@ -52,7 +52,7 @@ func (p *ProspectorLog) Run() { p.scan() // It is important that a first scan is run before cleanup to make sure all new states are read first - if p.config.CleanOlder > 0 { + if p.config.CleanInactive > 0 { p.Prospector.states.Cleanup() logp.Debug("prospector", "Prospector states cleaned up.") } diff --git a/filebeat/tests/files/config.yml b/filebeat/tests/files/config.yml index 14f81f1877b..0f998101d45 100644 --- a/filebeat/tests/files/config.yml +++ b/filebeat/tests/files/config.yml @@ -13,7 +13,7 @@ filebeat: review: 1 type: log ignore_older: 0 - close_older: 1h + close_inactive: 1h scan_frequency: 10s harvester_buffer_size: 5000 tail_files: false diff --git a/filebeat/tests/load/filebeat.yml b/filebeat/tests/load/filebeat.yml index 2df30ac223b..65e640ca280 100644 --- a/filebeat/tests/load/filebeat.yml +++ b/filebeat/tests/load/filebeat.yml @@ -8,7 +8,7 @@ filebeat: # level: debug # review: 1 ignore_older: 0 - close_older: 1h + close_inactive: 1h scan_frequency: 0s harvester_buffer_size: 1000000 diff --git a/filebeat/tests/system/config/filebeat.yml.j2 b/filebeat/tests/system/config/filebeat.yml.j2 index 97de7c0769e..a80640eb029 100644 --- a/filebeat/tests/system/config/filebeat.yml.j2 +++ b/filebeat/tests/system/config/filebeat.yml.j2 @@ -12,7 +12,7 @@ filebeat.prospectors: # Type of the files. Annotated in every documented scan_frequency: {{scan_frequency | default("0.1s") }} ignore_older: {{ignore_older}} - close_older: {{close_older}} + close_inactive: {{close_inactive}} harvester_buffer_size: encoding: {{encoding | default("utf-8") }} tail_files: {{tail_files}} @@ -23,7 +23,7 @@ filebeat.prospectors: close_renamed: {{close_renamed}} close_eof: {{close_eof}} force_close_files: {{force_close_files}} - clean_older: {{clean_older}} + clean_inactive: {{clean_inactive}} clean_removed: {{clean_removed}} {% if fields %} diff --git a/filebeat/tests/system/test_prospector.py b/filebeat/tests/system/test_prospector.py index f7bf3e26d5d..ce137f6b235 100644 --- a/filebeat/tests/system/test_prospector.py +++ b/filebeat/tests/system/test_prospector.py @@ -109,11 +109,11 @@ def test_stdin(self): objs = self.read_output() assert len(objs) == iterations1 + iterations2 - def test_rotating_close_older_larger_write_rate(self): + def test_rotating_close_inactive_larger_write_rate(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", ignore_older="10s", - close_older="1s", + close_inactive="1s", scan_frequency="0.1s", ) @@ -172,11 +172,11 @@ def test_exclude_files(self): assert 1 == len(output) assert output[0]["message"] == "line in log file" - def test_rotating_close_older_low_write_rate(self): + def test_rotating_close_inactive_low_write_rate(self): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", ignore_older="10s", - close_older="1s", + close_inactive="1s", scan_frequency="0.1s", ) @@ -207,7 +207,7 @@ def test_rotating_close_older_low_write_rate(self): os.rename(testfile, testfile + ".1") open(testfile, 'w').close() - # wait for file to be closed due to close_older + # wait for file to be closed due to close_inactive self.wait_until( lambda: self.log_contains( "Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))), @@ -306,16 +306,16 @@ def test_files_added_late(self): filebeat.check_kill_and_wait() - def test_close_older(self): + def test_close_inactive(self): """ - Test that close_older closes the file but reading + Test that close_inactive closes the file but reading is picked up again after scan_frequency """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*", - ignore_older="1h", - close_older="1s", - scan_frequency="0.1s", + path=os.path.abspath(self.working_dir) + "/log/*", + ignore_older="1h", + close_inactive="1s", + scan_frequency="0.1s", ) os.mkdir(self.working_dir + "/log/") @@ -341,7 +341,7 @@ def test_close_older(self): lambda: self.output_has(lines=lines), max_timeout=15) - # wait for file to be closed due to close_older + # wait for file to be closed due to close_inactive self.wait_until( lambda: self.log_contains( "Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))), @@ -359,15 +359,15 @@ def test_close_older(self): filebeat.check_kill_and_wait() - def test_close_older_file_removal(self): + def test_close_inactive_file_removal(self): """ - Test that close_older still applies also if the file to close was removed + Test that close_inactive still applies also if the file to close was removed """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/*", - ignore_older="1h", - close_older="3s", - scan_frequency="0.1s", + path=os.path.abspath(self.working_dir) + "/log/*", + ignore_older="1h", + close_inactive="3s", + scan_frequency="0.1s", ) os.mkdir(self.working_dir + "/log/") @@ -395,7 +395,7 @@ def test_close_older_file_removal(self): os.remove(testfile) - # wait for file to be closed due to close_older + # wait for file to be closed due to close_inactive self.wait_until( lambda: self.log_contains( "Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))), @@ -404,15 +404,15 @@ def test_close_older_file_removal(self): filebeat.check_kill_and_wait() - def test_close_older_file_rotation_and_removal(self): + def test_close_inactive_file_rotation_and_removal(self): """ - Test that close_older still applies also if the file to close was removed + Test that close_inactive still applies also if the file to close was removed """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/test.log", - ignore_older="1h", - close_older="3s", - scan_frequency="0.1s", + path=os.path.abspath(self.working_dir) + "/log/test.log", + ignore_older="1h", + close_inactive="3s", + scan_frequency="0.1s", ) os.mkdir(self.working_dir + "/log/") @@ -442,7 +442,7 @@ def test_close_older_file_rotation_and_removal(self): os.rename(testfile, renamed_file) os.remove(renamed_file) - # wait for file to be closed due to close_older + # wait for file to be closed due to close_inactive self.wait_until( lambda: self.log_contains( # Still checking for old file name as filename does not change in harvester @@ -452,16 +452,16 @@ def test_close_older_file_rotation_and_removal(self): filebeat.check_kill_and_wait() - def test_close_older_file_rotation_and_removal(self): + def test_close_inactive_file_rotation_and_removal(self): """ - Test that close_older still applies also if file was rotated, + Test that close_inactive still applies also if file was rotated, new file created, and rotated file removed. """ self.render_config_template( - path=os.path.abspath(self.working_dir) + "/log/test.log", - ignore_older="1h", - close_older="3s", - scan_frequency="0.1s", + path=os.path.abspath(self.working_dir) + "/log/test.log", + ignore_older="1h", + close_inactive="3s", + scan_frequency="0.1s", ) os.mkdir(self.working_dir + "/log/") diff --git a/filebeat/tests/system/test_registrar.py b/filebeat/tests/system/test_registrar.py index ad8511f0693..e9f777fffef 100644 --- a/filebeat/tests/system/test_registrar.py +++ b/filebeat/tests/system/test_registrar.py @@ -704,19 +704,18 @@ def test_migration_windows(self): assert len(data) == 2 - def test_clean_older(self): + def test_clean_inactive(self): """ - Checks that states are properly removed after clean_older + Checks that states are properly removed after clean_inactive """ self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/input*", - clean_older="4s", + clean_inactive="4s", ignore_older="2s", - close_older="0.2s", + close_inactive="0.2s", scan_frequency="0.1s" ) - os.mkdir(self.working_dir + "/log/") testfile1 = self.working_dir + "/log/input1" testfile2 = self.working_dir + "/log/input2"