From 908f72601f8c41e318e84a75951284f7ae9f6f9f Mon Sep 17 00:00:00 2001 From: Yaroslav Kirillov Date: Wed, 18 Dec 2024 14:12:00 +0500 Subject: [PATCH 1/2] K8s-multiline cut off long events by limit (#720) * k8s-multiline cut off long events by limit * Replace cut_off_message to cut_off_field --- fd/util.go | 42 +++++------ pipeline/pipeline.go | 89 ++++++++++++----------- pipeline/pipeline_whitebox_test.go | 32 +++----- plugin/input/k8s/multiline_action.go | 46 ++++++++++-- plugin/input/k8s/multiline_action_test.go | 24 ++++++ 5 files changed, 140 insertions(+), 93 deletions(-) diff --git a/fd/util.go b/fd/util.go index 0bc3fc34..395a9d96 100644 --- a/fd/util.go +++ b/fd/util.go @@ -23,7 +23,7 @@ func extractPipelineParams(settings *simplejson.Json) *pipeline.Settings { avgInputEventSize := pipeline.DefaultAvgInputEventSize maxInputEventSize := pipeline.DefaultMaxInputEventSize cutOffEventByLimit := pipeline.DefaultCutOffEventByLimit - cutOffEventByLimitMsg := pipeline.DefaultCutOffEventByLimitMsg + cutOffEventByLimitField := pipeline.DefaultCutOffEventByLimitField streamField := pipeline.DefaultStreamField maintenanceInterval := pipeline.DefaultMaintenanceInterval decoder := pipeline.DefaultDecoder @@ -56,11 +56,7 @@ func extractPipelineParams(settings *simplejson.Json) *pipeline.Settings { } cutOffEventByLimit = settings.Get("cut_off_event_by_limit").MustBool() - - cutOffEventByLimitMsg = settings.Get("cut_off_event_by_limit_message").MustString() - if maxInputEventSize > 0 && len(cutOffEventByLimitMsg) >= maxInputEventSize { - logger.Fatal("length of cut_off_event_by_limit_message must be less than max_event_size") - } + cutOffEventByLimitField = settings.Get("cut_off_event_by_limit_field").MustString() str := settings.Get("decoder").MustString() if str != "" { @@ -124,23 +120,23 @@ func extractPipelineParams(settings *simplejson.Json) *pipeline.Settings { } return &pipeline.Settings{ - Decoder: decoder, - DecoderParams: decoderParams, - Capacity: capacity, - MetaCacheSize: metaCacheSize, - AvgEventSize: avgInputEventSize, - MaxEventSize: maxInputEventSize, - CutOffEventByLimit: cutOffEventByLimit, - CutOffEventByLimitMsg: cutOffEventByLimitMsg, - AntispamThreshold: antispamThreshold, - AntispamExceptions: antispamExceptions, - SourceNameMetaField: sourceNameMetaField, - MaintenanceInterval: maintenanceInterval, - EventTimeout: eventTimeout, - StreamField: streamField, - IsStrict: isStrict, - MetricHoldDuration: metricHoldDuration, - Pool: pipeline.PoolType(pool), + Decoder: decoder, + DecoderParams: decoderParams, + Capacity: capacity, + MetaCacheSize: metaCacheSize, + AvgEventSize: avgInputEventSize, + MaxEventSize: maxInputEventSize, + CutOffEventByLimit: cutOffEventByLimit, + CutOffEventByLimitField: cutOffEventByLimitField, + AntispamThreshold: antispamThreshold, + AntispamExceptions: antispamExceptions, + SourceNameMetaField: sourceNameMetaField, + MaintenanceInterval: maintenanceInterval, + EventTimeout: eventTimeout, + StreamField: streamField, + IsStrict: isStrict, + MetricHoldDuration: metricHoldDuration, + Pool: pipeline.PoolType(pool), } } diff --git a/pipeline/pipeline.go b/pipeline/pipeline.go index 1f2c65d5..d7c0e08c 100644 --- a/pipeline/pipeline.go +++ b/pipeline/pipeline.go @@ -24,23 +24,23 @@ import ( ) const ( - DefaultAntispamThreshold = 0 - DefaultSourceNameMetaField = "" - DefaultDecoder = "auto" - DefaultIsStrict = false - DefaultStreamField = "stream" - DefaultCapacity = 1024 - DefaultAvgInputEventSize = 4 * 1024 - DefaultMaxInputEventSize = 0 - DefaultCutOffEventByLimit = false - DefaultCutOffEventByLimitMsg = "" - DefaultJSONNodePoolSize = 1024 - DefaultMaintenanceInterval = time.Second * 5 - DefaultEventTimeout = time.Second * 30 - DefaultFieldValue = "not_set" - DefaultStreamName = StreamName("not_set") - DefaultMetricHoldDuration = time.Minute * 30 - DefaultMetaCacheSize = 1024 + DefaultAntispamThreshold = 0 + DefaultSourceNameMetaField = "" + DefaultDecoder = "auto" + DefaultIsStrict = false + DefaultStreamField = "stream" + DefaultCapacity = 1024 + DefaultAvgInputEventSize = 4 * 1024 + DefaultMaxInputEventSize = 0 + DefaultCutOffEventByLimit = false + DefaultCutOffEventByLimitField = "" + DefaultJSONNodePoolSize = 1024 + DefaultMaintenanceInterval = time.Second * 5 + DefaultEventTimeout = time.Second * 30 + DefaultFieldValue = "not_set" + DefaultStreamName = StreamName("not_set") + DefaultMetricHoldDuration = time.Minute * 30 + DefaultMetaCacheSize = 1024 EventSeqIDError = uint64(0) @@ -146,23 +146,23 @@ type Pipeline struct { } type Settings struct { - Decoder string - DecoderParams map[string]any - Capacity int - MetaCacheSize int - MaintenanceInterval time.Duration - EventTimeout time.Duration - AntispamThreshold int - AntispamExceptions antispam.Exceptions - SourceNameMetaField string - AvgEventSize int - MaxEventSize int - CutOffEventByLimit bool - CutOffEventByLimitMsg string - StreamField string - IsStrict bool - MetricHoldDuration time.Duration - Pool PoolType + Decoder string + DecoderParams map[string]any + Capacity int + MetaCacheSize int + MaintenanceInterval time.Duration + EventTimeout time.Duration + AntispamThreshold int + AntispamExceptions antispam.Exceptions + SourceNameMetaField string + AvgEventSize int + MaxEventSize int + CutOffEventByLimit bool + CutOffEventByLimitField string + StreamField string + IsStrict bool + MetricHoldDuration time.Duration + Pool PoolType } type PoolType string @@ -419,9 +419,12 @@ type Offsets interface { // In decodes message and passes it to event stream. func (p *Pipeline) In(sourceID SourceID, sourceName string, offset Offsets, bytes []byte, isNewSource bool, meta metadata.MetaData) (seqID uint64) { + var ( + ok bool + cutoff bool + ) // don't process mud. - var ok bool - bytes, ok = p.checkInputBytes(bytes, sourceName, meta) + bytes, cutoff, ok = p.checkInputBytes(bytes, sourceName, meta) if !ok { return EventSeqIDError } @@ -563,6 +566,9 @@ func (p *Pipeline) In(sourceID SourceID, sourceName string, offset Offsets, byte } } } + if cutoff && p.settings.CutOffEventByLimitField != "" { + event.Root.AddFieldNoAlloc(event.Root, p.settings.CutOffEventByLimitField).MutateToBool(true) + } event.Offset = offset.Current() event.SourceID = sourceID @@ -572,11 +578,11 @@ func (p *Pipeline) In(sourceID SourceID, sourceName string, offset Offsets, byte return p.streamEvent(event) } -func (p *Pipeline) checkInputBytes(bytes []byte, sourceName string, meta metadata.MetaData) ([]byte, bool) { +func (p *Pipeline) checkInputBytes(bytes []byte, sourceName string, meta metadata.MetaData) ([]byte, bool, bool) { length := len(bytes) if length == 0 || (bytes[0] == '\n' && length == 1) { - return bytes, false + return bytes, false, false } if p.settings.MaxEventSize != 0 && length > p.settings.MaxEventSize { @@ -587,17 +593,18 @@ func (p *Pipeline) checkInputBytes(bytes []byte, sourceName string, meta metadat p.IncMaxEventSizeExceeded(source) if !p.settings.CutOffEventByLimit { - return bytes, false + return bytes, false, false } wasNewLine := bytes[len(bytes)-1] == '\n' - bytes = append(bytes[:p.settings.MaxEventSize], p.settings.CutOffEventByLimitMsg...) + bytes = bytes[:p.settings.MaxEventSize] if wasNewLine { bytes = append(bytes, '\n') } + return bytes, true, true } - return bytes, true + return bytes, false, true } func (p *Pipeline) streamEvent(event *Event) uint64 { diff --git a/pipeline/pipeline_whitebox_test.go b/pipeline/pipeline_whitebox_test.go index 29f33726..9dd9b400 100644 --- a/pipeline/pipeline_whitebox_test.go +++ b/pipeline/pipeline_whitebox_test.go @@ -56,6 +56,7 @@ func TestCheckInputBytes(t *testing.T) { pipelineSettings *Settings input []byte want []byte + wantCutoff bool wantOk bool }{ { @@ -111,9 +112,10 @@ func TestCheckInputBytes(t *testing.T) { MaxEventSize: 10, CutOffEventByLimit: true, }, - input: []byte("some loooooooog"), - want: []byte("some loooo"), - wantOk: true, + input: []byte("some loooooooog"), + want: []byte("some loooo"), + wantCutoff: true, + wantOk: true, }, { name: "cutoff_newline", @@ -124,23 +126,10 @@ func TestCheckInputBytes(t *testing.T) { MaxEventSize: 10, CutOffEventByLimit: true, }, - input: []byte("some loooooooog\n"), - want: []byte("some loooo\n"), - wantOk: true, - }, - { - name: "cutoff_with_msg", - pipelineSettings: &Settings{ - Capacity: 5, - Decoder: "raw", - MetricHoldDuration: DefaultMetricHoldDuration, - MaxEventSize: 10, - CutOffEventByLimit: true, - CutOffEventByLimitMsg: "", - }, - input: []byte("some loooooooog\n"), - want: []byte("some loooo\n"), - wantOk: true, + input: []byte("some loooooooog\n"), + want: []byte("some loooo\n"), + wantCutoff: true, + wantOk: true, }, } @@ -148,8 +137,9 @@ func TestCheckInputBytes(t *testing.T) { t.Run(tCase.name, func(t *testing.T) { pipe := New("test_pipeline", tCase.pipelineSettings, prometheus.NewRegistry()) - data, ok := pipe.checkInputBytes(tCase.input, "test", nil) + data, cutoff, ok := pipe.checkInputBytes(tCase.input, "test", nil) + assert.Equal(t, tCase.wantCutoff, cutoff) assert.Equal(t, tCase.wantOk, ok) if !tCase.wantOk { return diff --git a/plugin/input/k8s/multiline_action.go b/plugin/input/k8s/multiline_action.go index 0e6961d3..838d61e7 100644 --- a/plugin/input/k8s/multiline_action.go +++ b/plugin/input/k8s/multiline_action.go @@ -12,14 +12,18 @@ type MultilineAction struct { allowedPodLabels map[string]bool allowedNodeLabels map[string]bool - logger *zap.SugaredLogger - controller pipeline.ActionPluginController - maxEventSize int - sourceNameMetaField string + logger *zap.SugaredLogger + controller pipeline.ActionPluginController + + maxEventSize int + sourceNameMetaField string + cutOffEventByLimit bool + cutOffEventByLimitField string eventBuf []byte eventSize int skipNextEvent bool + cutOffEvent bool } const ( @@ -32,6 +36,9 @@ func (p *MultilineAction) Start(config pipeline.AnyConfig, params *pipeline.Acti p.controller = params.Controller p.maxEventSize = params.PipelineSettings.MaxEventSize p.sourceNameMetaField = params.PipelineSettings.SourceNameMetaField + p.cutOffEventByLimit = params.PipelineSettings.CutOffEventByLimit + p.cutOffEventByLimitField = params.PipelineSettings.CutOffEventByLimitField + p.config = config.(*Config) p.allowedPodLabels = cfg.ListToMap(p.config.AllowedPodLabels) @@ -110,7 +117,16 @@ func (p *MultilineAction) Do(event *pipeline.Event) pipeline.ActionResult { // skip event if max_event_size is exceeded p.skipNextEvent = true - p.logger.Errorf("event chunk will be discarded due to max_event_size, source_name=%s, namespace=%s, pod=%s", event.SourceName, ns, pod) + + if p.cutOffEventByLimit { + offset := sizeAfterAppend - p.maxEventSize + p.eventBuf = append(p.eventBuf, logFragment[1:logFragmentLen-1-offset]...) + p.cutOffEvent = true + + p.logger.Errorf("event chunk will be cut off due to max_event_size, source_name=%s, namespace=%s, pod=%s", event.SourceName, ns, pod) + } else { + p.logger.Errorf("event chunk will be discarded due to max_event_size, source_name=%s, namespace=%s, pod=%s", event.SourceName, ns, pod) + } } return pipeline.ActionCollapse } @@ -121,8 +137,11 @@ func (p *MultilineAction) Do(event *pipeline.Event) pipeline.ActionResult { return pipeline.ActionCollapse } p.skipNextEvent = false - p.resetLogBuf() - return pipeline.ActionDiscard + + if !p.cutOffEvent { + p.resetLogBuf() + return pipeline.ActionDiscard + } } success, podMeta := meta.GetPodMeta(ns, pod, containerID) @@ -164,7 +183,17 @@ func (p *MultilineAction) Do(event *pipeline.Event) pipeline.ActionResult { } if len(p.eventBuf) > 1 { - p.eventBuf = append(p.eventBuf, logFragment[1:logFragmentLen-1]...) + if !p.cutOffEvent { + p.eventBuf = append(p.eventBuf, logFragment[1:logFragmentLen-1]...) + } else { + if isEnd { + p.eventBuf = append(p.eventBuf, newLine...) + } + + if p.cutOffEventByLimitField != "" { + event.Root.AddFieldNoAlloc(event.Root, p.cutOffEventByLimitField).MutateToBool(true) + } + } p.eventBuf = append(p.eventBuf, '"') l := len(event.Buf) @@ -179,4 +208,5 @@ func (p *MultilineAction) Do(event *pipeline.Event) pipeline.ActionResult { func (p *MultilineAction) resetLogBuf() { p.eventBuf = p.eventBuf[:1] p.eventSize = 0 + p.cutOffEvent = false } diff --git a/plugin/input/k8s/multiline_action_test.go b/plugin/input/k8s/multiline_action_test.go index 5ff9a057..06e471f1 100644 --- a/plugin/input/k8s/multiline_action_test.go +++ b/plugin/input/k8s/multiline_action_test.go @@ -37,6 +37,9 @@ func TestMultilineAction_Do(t *testing.T) { Name string EventParts []string + CutOffEventByLimit bool + CutOffEventByLimitField string + ActionResults []pipeline.ActionResult ExpectedRoot string }{ @@ -65,6 +68,24 @@ func TestMultilineAction_Do(t *testing.T) { ActionResults: []pipeline.ActionResult{pipeline.ActionCollapse, pipeline.ActionCollapse, pipeline.ActionDiscard}, ExpectedRoot: wrapK8sInfo(`event\n`, item, meta.SelfNodeName), }, + { + Name: "must cutoff long event", + EventParts: []string{`{"log": "some "}`, `{"log": "other long "}`, `{"log":"long long"}`, `{"log": "event\n"}`}, + CutOffEventByLimit: true, + ActionResults: []pipeline.ActionResult{pipeline.ActionCollapse, pipeline.ActionCollapse, pipeline.ActionCollapse, pipeline.ActionPass}, + ExpectedRoot: wrapK8sInfo(`some other long l\n`, item, meta.SelfNodeName), + }, + { + Name: "must cutoff long event with field", + EventParts: []string{`{"log": "some "}`, `{"log": "other long "}`, `{"log":"long long"}`, `{"log": "event\n"}`}, + CutOffEventByLimit: true, + CutOffEventByLimitField: "cutoff", + ActionResults: []pipeline.ActionResult{pipeline.ActionCollapse, pipeline.ActionCollapse, pipeline.ActionCollapse, pipeline.ActionPass}, + ExpectedRoot: fmt.Sprintf( + `{"log":"%s","k8s_pod":"%s","k8s_namespace":"%s","k8s_container":"%s","k8s_container_id":"%s","k8s_node":"%s","k8s_pod_label_allowed_label":"allowed_value","k8s_node_label_zone":"z34","cutoff":true}`, + `some other long l\n`, item.PodName, item.Namespace, item.ContainerName, item.ContainerID, meta.SelfNodeName, + ), + }, } root := insaneJSON.Spawn() defer insaneJSON.Release(root) @@ -72,6 +93,9 @@ func TestMultilineAction_Do(t *testing.T) { for _, tc := range tcs { t.Run(tc.Name, func(t *testing.T) { for i, part := range tc.EventParts { + plugin.cutOffEventByLimit = tc.CutOffEventByLimit + plugin.cutOffEventByLimitField = tc.CutOffEventByLimitField + require.NoError(t, root.DecodeString(part)) event := &pipeline.Event{Root: root, SourceName: filename, Size: len(part)} From 28be3e4d5cffb0a13740b9afe0b1b19cf8812e10 Mon Sep 17 00:00:00 2001 From: Dmitry Romanov Date: Fri, 20 Dec 2024 17:42:25 +0700 Subject: [PATCH 2/2] optimize file worker for once render meta (#725) --- plugin/input/file/README.md | 2 -- plugin/input/file/worker.go | 42 ++++++++++++++------------------ plugin/input/file/worker_test.go | 13 +--------- 3 files changed, 19 insertions(+), 38 deletions(-) diff --git a/plugin/input/file/README.md b/plugin/input/file/README.md index 06339dfb..90207fea 100755 --- a/plugin/input/file/README.md +++ b/plugin/input/file/README.md @@ -166,6 +166,4 @@ Dirs that don't meet this pattern will be ignored. **`symlink`** **`inode`** - -**`offset`**
*Generated using [__insane-doc__](https://github.com/vitkovskii/insane-doc)* \ No newline at end of file diff --git a/plugin/input/file/worker.go b/plugin/input/file/worker.go index 23cefbdb..98db196e 100644 --- a/plugin/input/file/worker.go +++ b/plugin/input/file/worker.go @@ -69,6 +69,23 @@ func (w *worker) work(controller inputer, jobProvider *jobProvider, readBufferSi readTotal := int64(0) scanned := int64(0) + var metadataInfo metadata.MetaData + if w.metaTemplater != nil { + metaData, err := newMetaInformation( + job.filename, + job.symlink, + job.inode, + w.needK8sMeta, + ) + if err != nil { + logger.Error("cannot parse meta info", zap.Error(err)) + } + metadataInfo, err = w.metaTemplater.Render(metaData) + if err != nil { + logger.Error("cannot render meta info", zap.Error(err)) + } + } + // append the data of the old work, this happens when the event was not completely written to the file // for example: {"level": "info", "message": "some... // the end of the message can be added later and will be read in this iteration @@ -123,24 +140,6 @@ func (w *worker) work(controller inputer, jobProvider *jobProvider, readBufferSi inBuf = accumBuf } - var metadataInfo metadata.MetaData - if w.metaTemplater != nil { - metaData, err := newMetaInformation( - job.filename, - job.symlink, - job.inode, - lastOffset+scanned, - w.needK8sMeta, - ) - if err != nil { - logger.Error("cannot parse meta info", zap.Error(err)) - } - metadataInfo, err = w.metaTemplater.Render(metaData) - if err != nil { - logger.Error("cannot render meta info", zap.Error(err)) - } - } - job.lastEventSeq = controller.In(sourceID, sourceName, Offset{lastOffset + scanned, offsets}, inBuf, isVirgin, metadataInfo) } // restore the line buffer @@ -197,12 +196,11 @@ type metaInformation struct { filename string symlink string inode uint64 - offset int64 k8sMetadata *k8s_meta.K8sMetaInformation } -func newMetaInformation(filename, symlink string, inode inodeID, offset int64, parseK8sMeta bool) (metaInformation, error) { +func newMetaInformation(filename, symlink string, inode inodeID, parseK8sMeta bool) (metaInformation, error) { var metaData k8s_meta.K8sMetaInformation var err error if parseK8sMeta { @@ -219,7 +217,6 @@ func newMetaInformation(filename, symlink string, inode inodeID, offset int64, p filename: filename, symlink: symlink, inode: uint64(inode), - offset: offset, k8sMetadata: &metaData, }, nil } @@ -229,7 +226,6 @@ func (m metaInformation) GetData() map[string]any { "filename": m.filename, "symlink": m.symlink, "inode": m.inode, - "offset": m.offset, } if m.k8sMetadata != nil { @@ -255,8 +251,6 @@ func (m metaInformation) GetData() map[string]any { **`symlink`** **`inode`** - -**`offset`** }*/ type Offset struct { diff --git a/plugin/input/file/worker_test.go b/plugin/input/file/worker_test.go index 507f25a5..2c6ce67c 100644 --- a/plugin/input/file/worker_test.go +++ b/plugin/input/file/worker_test.go @@ -315,7 +315,6 @@ func TestNewMetaInformation(t *testing.T) { filename string symlink string inode inodeID - offset int64 parseK8sMeta bool expectError bool expectedK8sMeta *k8s_meta.K8sMetaInformation @@ -325,7 +324,6 @@ func TestNewMetaInformation(t *testing.T) { filename: "/k8s-logs/advanced-logs-checker-2222222222-trtrq_sre_duty-bot-4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", symlink: "", inode: 12345, - offset: 0, parseK8sMeta: true, expectError: false, expectedK8sMeta: &k8s_meta.K8sMetaInformation{ @@ -340,7 +338,6 @@ func TestNewMetaInformation(t *testing.T) { filename: "/4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", symlink: "/k8s-logs/advanced-logs-checker-2222222222-trtrq_sre_duty-bot-4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", inode: 12345, - offset: 0, parseK8sMeta: true, expectError: false, expectedK8sMeta: &k8s_meta.K8sMetaInformation{ @@ -355,7 +352,6 @@ func TestNewMetaInformation(t *testing.T) { filename: "/4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", symlink: "/k8s-logs/advanced-logs-checker-2222222222-trtrq_sre_duty-bot-4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", inode: 12345, - offset: 0, parseK8sMeta: false, expectError: false, expectedK8sMeta: &k8s_meta.K8sMetaInformation{}, @@ -365,7 +361,6 @@ func TestNewMetaInformation(t *testing.T) { filename: "", symlink: "", inode: 0, - offset: 0, parseK8sMeta: false, expectError: false, expectedK8sMeta: &k8s_meta.K8sMetaInformation{}, // No K8s metadata expected @@ -375,7 +370,6 @@ func TestNewMetaInformation(t *testing.T) { filename: "invalidfile.txt", symlink: "invalidsymlink", inode: 0, - offset: 0, parseK8sMeta: true, expectError: true, expectedK8sMeta: &k8s_meta.K8sMetaInformation{}, // No K8s metadata expected @@ -384,7 +378,7 @@ func TestNewMetaInformation(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - metaInfo, err := newMetaInformation(tt.filename, tt.symlink, tt.inode, tt.offset, tt.parseK8sMeta) + metaInfo, err := newMetaInformation(tt.filename, tt.symlink, tt.inode, tt.parseK8sMeta) if (err != nil) != tt.expectError { t.Errorf("expected error: %v, got: %v", tt.expectError, err) @@ -394,7 +388,6 @@ func TestNewMetaInformation(t *testing.T) { assert.Equal(t, tt.filename, metaInfo.filename) assert.Equal(t, tt.symlink, metaInfo.symlink) assert.Equal(t, uint64(tt.inode), metaInfo.inode) - assert.Equal(t, tt.offset, metaInfo.offset) if tt.parseK8sMeta { assert.Equal(t, tt.expectedK8sMeta.PodName, metaInfo.k8sMetadata.PodName) @@ -421,7 +414,6 @@ func TestGetData(t *testing.T) { filename: "/4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", symlink: "/k8s-logs/advanced-logs-checker-2222222222-trtrq_sre_duty-bot-4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", inode: 12345, - offset: 0, k8sMetadata: &k8s_meta.K8sMetaInformation{ PodName: "advanced-logs-checker-2222222222-trtrq", Namespace: "sre", @@ -433,7 +425,6 @@ func TestGetData(t *testing.T) { "filename": "/4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", "symlink": "/k8s-logs/advanced-logs-checker-2222222222-trtrq_sre_duty-bot-4e0301b633eaa2bfdcafdeba59ba0c72a3815911a6a820bf273534b0f32d98e0.log", "inode": uint64(12345), - "offset": int64(0), "pod_name": "advanced-logs-checker-2222222222-trtrq", "namespace": "sre", "container_name": "duty-bot", @@ -446,13 +437,11 @@ func TestGetData(t *testing.T) { filename: "/container.log", symlink: "/k8s-logs/container.log", inode: 12345, - offset: 0, }, expected: map[string]any{ "filename": "/container.log", "symlink": "/k8s-logs/container.log", "inode": uint64(12345), - "offset": int64(0), "pod_name": nil, "namespace": nil, "container_name": nil,