-
Notifications
You must be signed in to change notification settings - Fork 4.9k
/
filebeat.go
569 lines (484 loc) · 19.1 KB
/
filebeat.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package beater
import (
"flag"
"fmt"
"path/filepath"
"strings"
"sync"
"time"
"github.com/elastic/beats/v7/filebeat/backup"
"github.com/elastic/beats/v7/filebeat/channel"
cfg "github.com/elastic/beats/v7/filebeat/config"
"github.com/elastic/beats/v7/filebeat/fileset"
_ "github.com/elastic/beats/v7/filebeat/include"
"github.com/elastic/beats/v7/filebeat/input"
"github.com/elastic/beats/v7/filebeat/input/filestream/takeover"
v2 "github.com/elastic/beats/v7/filebeat/input/v2"
"github.com/elastic/beats/v7/filebeat/input/v2/compat"
"github.com/elastic/beats/v7/filebeat/registrar"
"github.com/elastic/beats/v7/libbeat/autodiscover"
"github.com/elastic/beats/v7/libbeat/beat"
"github.com/elastic/beats/v7/libbeat/cfgfile"
"github.com/elastic/beats/v7/libbeat/common/cfgwarn"
"github.com/elastic/beats/v7/libbeat/esleg/eslegclient"
"github.com/elastic/beats/v7/libbeat/management"
"github.com/elastic/beats/v7/libbeat/monitoring/inputmon"
"github.com/elastic/beats/v7/libbeat/outputs/elasticsearch"
"github.com/elastic/beats/v7/libbeat/publisher/pipetool"
"github.com/elastic/beats/v7/libbeat/statestore"
conf "github.com/elastic/elastic-agent-libs/config"
"github.com/elastic/elastic-agent-libs/logp"
"github.com/elastic/elastic-agent-libs/monitoring"
"github.com/elastic/elastic-agent-libs/paths"
"github.com/elastic/go-concert/unison"
// Add filebeat level processors
_ "github.com/elastic/beats/v7/filebeat/processor/add_kubernetes_metadata"
_ "github.com/elastic/beats/v7/libbeat/processors/decode_csv_fields"
// include all filebeat specific autodiscover features
_ "github.com/elastic/beats/v7/filebeat/autodiscover"
)
const pipelinesWarning = "Filebeat is unable to load the ingest pipelines for the configured" +
" modules because the Elasticsearch output is not configured/enabled. If you have" +
" already loaded the ingest pipelines or are using Logstash pipelines, you" +
" can ignore this warning."
var once = flag.Bool("once", false, "Run filebeat only once until all harvesters reach EOF")
// Filebeat is a beater object. Contains all objects needed to run the beat
type Filebeat struct {
config *cfg.Config
moduleRegistry *fileset.ModuleRegistry
pluginFactory PluginFactory
done chan struct{}
stopOnce sync.Once // wraps the Stop() method
pipeline beat.PipelineConnector
}
type PluginFactory func(beat.Info, *logp.Logger, StateStore) []v2.Plugin
type StateStore interface {
Access() (*statestore.Store, error)
CleanupInterval() time.Duration
}
// New creates a new Filebeat pointer instance.
func New(plugins PluginFactory) beat.Creator {
return func(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) {
return newBeater(b, plugins, rawConfig)
}
}
func newBeater(b *beat.Beat, plugins PluginFactory, rawConfig *conf.C) (beat.Beater, error) {
config := cfg.DefaultConfig
if err := rawConfig.Unpack(&config); err != nil {
return nil, fmt.Errorf("Error reading config file: %w", err)
}
if err := cfgwarn.CheckRemoved6xSettings(
rawConfig,
"prospectors",
"config.prospectors",
"registry_file",
"registry_file_permissions",
"registry_flush",
); err != nil {
return nil, err
}
enableAllFilesets, _ := b.BeatConfig.Bool("config.modules.enable_all_filesets", -1)
forceEnableModuleFilesets, _ := b.BeatConfig.Bool("config.modules.force_enable_module_filesets", -1)
filesetOverrides := fileset.FilesetOverrides{
EnableAllFilesets: enableAllFilesets,
ForceEnableModuleFilesets: forceEnableModuleFilesets,
}
moduleRegistry, err := fileset.NewModuleRegistry(config.Modules, b.Info, true, filesetOverrides)
if err != nil {
return nil, err
}
moduleInputs, err := moduleRegistry.GetInputConfigs()
if err != nil {
return nil, err
}
if err := config.FetchConfigs(); err != nil {
return nil, err
}
if b.API != nil {
if err = inputmon.AttachHandler(b.API.Router()); err != nil {
return nil, fmt.Errorf("failed attach inputs api to monitoring endpoint server: %w", err)
}
}
if b.Manager != nil {
b.Manager.RegisterDiagnosticHook("input_metrics", "Metrics from active inputs.",
"input_metrics.json", "application/json", func() []byte {
data, err := inputmon.MetricSnapshotJSON()
if err != nil {
logp.L().Warnw("Failed to collect input metric snapshot for Agent diagnostics.", "error", err)
return []byte(err.Error())
}
return data
})
}
// Add inputs created by the modules
config.Inputs = append(config.Inputs, moduleInputs...)
enabledInputs := config.ListEnabledInputs()
var haveEnabledInputs bool
if len(enabledInputs) > 0 {
haveEnabledInputs = true
}
if !config.ConfigInput.Enabled() && !config.ConfigModules.Enabled() && !haveEnabledInputs && config.Autodiscover == nil && !b.Manager.Enabled() {
if !b.InSetupCmd {
return nil, fmt.Errorf("no modules or inputs enabled and configuration reloading disabled. What files do you want me to watch?")
}
// in the `setup` command, log this only as a warning
logp.Warn("Setup called, but no modules enabled.")
}
if *once && config.ConfigInput.Enabled() && config.ConfigModules.Enabled() {
return nil, fmt.Errorf("input configs and -once cannot be used together")
}
if config.IsInputEnabled("stdin") && len(enabledInputs) > 1 {
return nil, fmt.Errorf("stdin requires to be run in exclusive mode, configured inputs: %s", strings.Join(enabledInputs, ", "))
}
fb := &Filebeat{
done: make(chan struct{}),
config: &config,
moduleRegistry: moduleRegistry,
pluginFactory: plugins,
}
err = fb.setupPipelineLoaderCallback(b)
if err != nil {
return nil, err
}
return fb, nil
}
// setupPipelineLoaderCallback sets the callback function for loading pipelines during setup.
func (fb *Filebeat) setupPipelineLoaderCallback(b *beat.Beat) error {
if b.Config.Output.Name() != "elasticsearch" && !b.Manager.Enabled() {
logp.Warn(pipelinesWarning)
return nil
}
overwritePipelines := true
b.OverwritePipelinesCallback = func(esConfig *conf.C) error {
esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat")
if err != nil {
return err
}
// When running the subcommand setup, configuration from modules.d directories
// have to be loaded using cfg.Reloader. Otherwise those configurations are skipped.
pipelineLoaderFactory := newPipelineLoaderFactory(b.Config.Output.Config())
enableAllFilesets, _ := b.BeatConfig.Bool("config.modules.enable_all_filesets", -1)
forceEnableModuleFilesets, _ := b.BeatConfig.Bool("config.modules.force_enable_module_filesets", -1)
filesetOverrides := fileset.FilesetOverrides{
EnableAllFilesets: enableAllFilesets,
ForceEnableModuleFilesets: forceEnableModuleFilesets,
}
modulesFactory := fileset.NewSetupFactory(b.Info, pipelineLoaderFactory, filesetOverrides)
if fb.config.ConfigModules.Enabled() {
if enableAllFilesets {
// All module configs need to be loaded to enable all the filesets
// contained in the modules. The default glob just loads the enabled
// ones. Switching the glob pattern from *.yml to * achieves this.
origPath, _ := fb.config.ConfigModules.String("path", -1)
newPath := strings.TrimSuffix(origPath, ".yml")
_ = fb.config.ConfigModules.SetString("path", -1, newPath)
}
modulesLoader := cfgfile.NewReloader(fb.pipeline, fb.config.ConfigModules)
modulesLoader.Load(modulesFactory)
}
return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines)
}
return nil
}
// loadModulesPipelines is called when modules are configured to do the initial
// setup.
func (fb *Filebeat) loadModulesPipelines(b *beat.Beat) error {
if b.Config.Output.Name() != "elasticsearch" {
logp.Warn(pipelinesWarning)
return nil
}
overwritePipelines := fb.config.OverwritePipelines
if b.InSetupCmd {
overwritePipelines = true
}
// register pipeline loading to happen every time a new ES connection is
// established
callback := func(esClient *eslegclient.Connection) error {
return fb.moduleRegistry.LoadPipelines(esClient, overwritePipelines)
}
_, err := elasticsearch.RegisterConnectCallback(callback)
return err
}
// Run allows the beater to be run as a beat.
func (fb *Filebeat) Run(b *beat.Beat) error {
var err error
config := fb.config
if !fb.moduleRegistry.Empty() {
err = fb.loadModulesPipelines(b)
if err != nil {
return err
}
}
waitFinished := newSignalWait()
waitEvents := newSignalWait()
// count active events for waiting on shutdown
wgEvents := &eventCounter{
count: monitoring.NewInt(nil, "filebeat.events.active"), // Gauge
added: monitoring.NewUint(nil, "filebeat.events.added"),
done: monitoring.NewUint(nil, "filebeat.events.done"),
}
finishedLogger := newFinishedLogger(wgEvents)
registryMigrator := registrar.NewMigrator(config.Registry)
if err := registryMigrator.Run(); err != nil {
logp.Err("Failed to migrate registry file: %+v", err)
return err
}
stateStore, err := openStateStore(b.Info, logp.NewLogger("filebeat"), config.Registry)
if err != nil {
logp.Err("Failed to open state store: %+v", err)
return err
}
defer stateStore.Close()
err = processLogInputTakeOver(stateStore, config)
if err != nil {
logp.Err("Failed to attempt filestream state take over: %+v", err)
return err
}
// Setup registrar to persist state
registrar, err := registrar.New(stateStore, finishedLogger, config.Registry.FlushTimeout)
if err != nil {
logp.Err("Could not init registrar: %v", err)
return err
}
// Make sure all events that were published in
registrarChannel := newRegistrarLogger(registrar)
// setup event counting for startup and a global common ACKer, such that all events will be
// routed to the reigstrar after they've been ACKed.
// Events with Private==nil or the type of private != file.State are directly
// forwarded to `finishedLogger`. Events from the `logs` input will first be forwarded
// to the registrar via `registrarChannel`, which finally forwards the events to finishedLogger as well.
// The finishedLogger decrements the counters in wgEvents after all events have been securely processed
// by the registry.
fb.pipeline = withPipelineEventCounter(b.Publisher, wgEvents)
fb.pipeline = pipetool.WithACKer(fb.pipeline, eventACKer(finishedLogger, registrarChannel))
// Filebeat by default required infinite retry. Let's configure this for all
// inputs by default. Inputs (and InputController) can overwrite the sending
// guarantees explicitly when connecting with the pipeline.
fb.pipeline = pipetool.WithDefaultGuarantees(fb.pipeline, beat.GuaranteedSend)
outDone := make(chan struct{}) // outDone closes down all active pipeline connections
pipelineConnector := channel.NewOutletFactory(outDone).Create
// Create a ES connection factory for dynamic modules pipeline loading
var pipelineLoaderFactory fileset.PipelineLoaderFactory
if b.Config.Output.Name() == "elasticsearch" {
pipelineLoaderFactory = newPipelineLoaderFactory(b.Config.Output.Config())
} else {
logp.Warn(pipelinesWarning)
}
inputsLogger := logp.NewLogger("input")
v2Inputs := fb.pluginFactory(b.Info, inputsLogger, stateStore)
v2InputLoader, err := v2.NewLoader(inputsLogger, v2Inputs, "type", cfg.DefaultType)
if err != nil {
panic(err) // loader detected invalid state.
}
var inputTaskGroup unison.TaskGroup
defer func() {
_ = inputTaskGroup.Stop()
}()
if err := v2InputLoader.Init(&inputTaskGroup); err != nil {
logp.Err("Failed to initialize the input managers: %v", err)
return err
}
inputLoader := channel.RunnerFactoryWithCommonInputSettings(b.Info, compat.Combine(
compat.RunnerFactory(inputsLogger, b.Info, v2InputLoader),
input.NewRunnerFactory(pipelineConnector, registrar, fb.done),
))
moduleLoader := fileset.NewFactory(inputLoader, b.Info, pipelineLoaderFactory, config.OverwritePipelines)
crawler, err := newCrawler(inputLoader, moduleLoader, config.Inputs, fb.done, *once)
if err != nil {
logp.Err("Could not init crawler: %v", err)
return err
}
// The order of starting and stopping is important. Stopping is inverted to the starting order.
// The current order is: registrar, publisher, spooler, crawler
// That means, crawler is stopped first.
// Start the registrar
err = registrar.Start()
if err != nil {
return fmt.Errorf("Could not start registrar: %w", err)
}
// Stopping registrar will write last state
defer registrar.Stop()
// Stopping publisher (might potentially drop items)
defer func() {
// Closes first the registrar logger to make sure not more events arrive at the registrar
// registrarChannel must be closed first to potentially unblock (pretty unlikely) the publisher
registrarChannel.Close()
close(outDone) // finally close all active connections to publisher pipeline
}()
// Wait for all events to be processed or timeout
defer waitEvents.Wait()
if config.OverwritePipelines {
logp.Debug("modules", "Existing Ingest pipelines will be updated")
}
err = crawler.Start(fb.pipeline, config.ConfigInput, config.ConfigModules)
if err != nil {
crawler.Stop()
return fmt.Errorf("Failed to start crawler: %w", err)
}
// If run once, add crawler completion check as alternative to done signal
if *once {
runOnce := func() {
logp.Info("Running filebeat once. Waiting for completion ...")
crawler.WaitForCompletion()
logp.Info("All data collection completed. Shutting down.")
}
waitFinished.Add(runOnce)
}
// Register reloadable list of inputs and modules
inputs := cfgfile.NewRunnerList(management.DebugK, inputLoader, fb.pipeline)
b.Registry.MustRegisterInput(inputs)
modules := cfgfile.NewRunnerList(management.DebugK, moduleLoader, fb.pipeline)
var adiscover *autodiscover.Autodiscover
if fb.config.Autodiscover != nil {
adiscover, err = autodiscover.NewAutodiscover(
"filebeat",
fb.pipeline,
cfgfile.MultiplexedRunnerFactory(
cfgfile.MatchHasField("module", moduleLoader),
cfgfile.MatchDefault(inputLoader),
),
autodiscover.QueryConfig(),
config.Autodiscover,
b.Keystore,
)
if err != nil {
return err
}
}
adiscover.Start()
// We start the manager when all the subsystem are initialized and ready to received events.
if err := b.Manager.Start(); err != nil {
return err
}
// Add done channel to wait for shutdown signal
waitFinished.AddChan(fb.done)
waitFinished.Wait()
// Stop reloadable lists, autodiscover -> Stop crawler -> stop inputs -> stop harvesters
// Note: waiting for crawlers to stop here in order to install wgEvents.Wait
// after all events have been enqueued for publishing. Otherwise wgEvents.Wait
// or publisher might panic due to concurrent updates.
inputs.Stop()
modules.Stop()
adiscover.Stop()
crawler.Stop()
timeout := fb.config.ShutdownTimeout
// Checks if on shutdown it should wait for all events to be published
waitPublished := fb.config.ShutdownTimeout > 0 || *once
if waitPublished {
// Wait for registrar to finish writing registry
waitEvents.Add(withLog(wgEvents.Wait,
"Continue shutdown: All enqueued events being published."))
// Wait for either timeout or all events having been ACKed by outputs.
if fb.config.ShutdownTimeout > 0 {
logp.Info("Shutdown output timer started. Waiting for max %v.", timeout)
waitEvents.Add(withLog(waitDuration(timeout),
"Continue shutdown: Time out waiting for events being published."))
} else {
waitEvents.AddChan(fb.done)
}
}
// Stop the manager and stop the connection to any dependent services.
// The Manager started to have a working implementation when
// https://github.com/elastic/beats/pull/34416 was merged.
// This is intended to enable TLS certificates reload on a long
// running Beat.
//
// However calling b.Manager.Stop() here messes up the behavior of the
// --once flag because it makes Filebeat exit early.
// So if --once is passed, we don't call b.Manager.Stop().
if !*once {
b.Manager.Stop()
}
return nil
}
// Stop is called on exit to stop the crawling, spooling and registration processes.
func (fb *Filebeat) Stop() {
logp.Info("Stopping filebeat")
// Stop Filebeat
fb.stopOnce.Do(func() { close(fb.done) })
}
// Create a new pipeline loader (es client) factory
func newPipelineLoaderFactory(esConfig *conf.C) fileset.PipelineLoaderFactory {
pipelineLoaderFactory := func() (fileset.PipelineLoader, error) {
esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat")
if err != nil {
return nil, fmt.Errorf("Error creating Elasticsearch client: %w", err)
}
return esClient, nil
}
return pipelineLoaderFactory
}
// some of the filestreams might want to take over the loginput state
// if their `take_over` flag is set to `true`.
func processLogInputTakeOver(stateStore StateStore, config *cfg.Config) error {
inputs, err := fetchInputConfiguration(config)
if err != nil {
return fmt.Errorf("Failed to fetch input configuration when attempting take over: %w", err)
}
if len(inputs) == 0 {
return nil
}
store, err := stateStore.Access()
if err != nil {
return fmt.Errorf("Failed to access state when attempting take over: %w", err)
}
defer store.Close()
logger := logp.NewLogger("filestream-takeover")
registryHome := paths.Resolve(paths.Data, config.Registry.Path)
registryHome = filepath.Join(registryHome, "filebeat")
backuper := backup.NewRegistryBackuper(logger, registryHome)
return takeover.TakeOverLogInputStates(logger, store, backuper, inputs)
}
// fetches all the defined input configuration available at Filebeat startup including external files.
func fetchInputConfiguration(config *cfg.Config) (inputs []*conf.C, err error) {
if len(config.Inputs) == 0 {
inputs = []*conf.C{}
} else {
inputs = config.Inputs
}
// reading external input configuration if defined
var dynamicInputCfg cfgfile.DynamicConfig
if config.ConfigInput != nil {
err = config.ConfigInput.Unpack(&dynamicInputCfg)
if err != nil {
return nil, fmt.Errorf("failed to unpack the dynamic input configuration: %w", err)
}
}
if dynamicInputCfg.Path == "" {
return inputs, nil
}
cfgPaths, err := filepath.Glob(dynamicInputCfg.Path)
if err != nil {
return nil, fmt.Errorf("failed to resolve external input configuration paths: %w", err)
}
if len(cfgPaths) == 0 {
return inputs, nil
}
// making a copy so we can safely extend the slice
inputs = make([]*conf.C, len(config.Inputs))
copy(inputs, config.Inputs)
for _, p := range cfgPaths {
externalInputs, err := cfgfile.LoadList(p)
if err != nil {
return nil, fmt.Errorf("failed to load external input configuration: %w", err)
}
inputs = append(inputs, externalInputs...)
}
return inputs, nil
}