Skip to content

Commit

Permalink
Add dynafile cache statistic support
Browse files Browse the repository at this point in the history
(not the same as dynamic stats)
  • Loading branch information
hynd committed Jul 3, 2019
1 parent 77a8a3e commit e444f32
Show file tree
Hide file tree
Showing 6 changed files with 283 additions and 1 deletion.
23 changes: 22 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ ruleset(name="process_stats") {
The exporter itself logs back via syslog, this cannot be configured at the moment.

## Provided Metrics
The following metrics provided by the rsyslog impstats module are tracked by rsyslog_exporter:
The following metrics provided by the rsyslog [impstats](https://www.rsyslog.com/doc/master/configuration/modules/impstats.html) module are tracked by rsyslog_exporter:

### Actions
Action objects describe what is to be done with a message, and are implemented via output modules.
Expand Down Expand Up @@ -68,4 +68,25 @@ Rsyslog tracks how it uses system resources and provides the following metrics:
* nvcsw - number of voluntary context switches
* nivcsw - number of involuntary context switches

### Dynafile Cache
The [omfile](https://www.rsyslog.com/rsyslog-statistic-counter-plugin-omfile/) module can generate
file names from a template. A cache of recent filehandles can be maintained, whose sizing can
impact performance considerably. The module provides the following metrics:

* requests - number of requests made to obtain a dynafile
* level0 - number of requests for the current active file
* missed - number of cache misses
* evicted - number of times a file needed to be evicted from cache
* maxused - maximum number of cache entries ever used
* closetimeouts - number of times a file was closed due to timeout settings

### Dynamic Stats
Rsyslog allows the user to define their own stats namespaces and increment counters within these
buckets using Rainerscript function calls.

These are exported as counters with the metric name identifying the bucket, and a label value
matching the name of the counter (the label name will always be "counter"). As well as custom
metrics, a "global" dynstats namespace is also published with some additional bookeeping counters.

See the [dyn_stats](https://www.rsyslog.com/doc/master/configuration/dyn_stats.html)
documentation for more information.
83 changes: 83 additions & 0 deletions dynafile_cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
package main

import (
"encoding/json"
"fmt"
"strings"
)

type dfcStat struct {
Name string `json:"name"`
Origin string `json:"origin"`
Requests int64 `json:"requests"`
Level0 int64 `json:"level0"`
Missed int64 `json:"missed"`
Evicted int64 `json:"evicted"`
MaxUsed int64 `json:"maxused"`
CloseTimeouts int64 `json:"closetimeouts"`
}

func newDynafileCacheFromJSON(b []byte) (*dfcStat, error) {
var pstat dfcStat
err := json.Unmarshal(b, &pstat)
if err != nil {
return nil, fmt.Errorf("error decoding dynafile cache stat `%v`: %v", string(b), err)
}
pstat.Name = strings.TrimPrefix(pstat.Name, "dynafile cache ")
return &pstat, nil
}

func (d *dfcStat) toPoints() []*point {
points := make([]*point, 6)

points[0] = &point{
Name: "dynafile_cache_requests",
Type: counter,
Value: d.Requests,
Description: "number of requests made to obtain a dynafile",
LabelName: "cache",
LabelValue: d.Name,
}
points[1] = &point{
Name: "dynafile_cache_level0",
Type: counter,
Value: d.Level0,
Description: "number of requests for the current active file",
LabelName: "cache",
LabelValue: d.Name,
}
points[2] = &point{
Name: "dynafile_cache_missed",
Type: counter,
Value: d.Missed,
Description: "number of cache misses",
LabelName: "cache",
LabelValue: d.Name,
}
points[3] = &point{
Name: "dynafile_cache_evicted",
Type: counter,
Value: d.Evicted,
Description: "number of times a file needed to be evicted from cache",
LabelName: "cache",
LabelValue: d.Name,
}
points[4] = &point{
Name: "dynafile_cache_maxused",
Type: counter,
Value: d.MaxUsed,
Description: "maximum number of cache entries ever used",
LabelName: "cache",
LabelValue: d.Name,
}
points[5] = &point{
Name: "dynafile_cache_closetimeouts",
Type: counter,
Value: d.CloseTimeouts,
Description: "number of times a file was closed due to timeout settings",
LabelName: "cache",
LabelValue: d.Name,
}

return points
}
139 changes: 139 additions & 0 deletions dynafile_cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
package main

import (
"reflect"
"testing"
)

var (
dynafileCacheLog = []byte(`{ "name": "dynafile cache cluster", "origin": "omfile", "requests": 1783254, "level0": 1470906, "missed": 2625, "evicted": 2525, "maxused": 100, "closetimeouts": 10 }`)
)

func TestNewDynafileCacheFromJSON(t *testing.T) {
logType := getStatType(dynafileCacheLog)
if logType != rsyslogDynafileCache {
t.Errorf("detected pstat type should be %d but is %d", rsyslogDynafileCache, logType)
}

pstat, err := newDynafileCacheFromJSON([]byte(dynafileCacheLog))
if err != nil {
t.Fatalf("expected parsing dynafile cache stat not to fail, got: %v", err)
}

if want, got := "cluster", pstat.Name; want != got {
t.Errorf("want '%s', got '%s'", want, got)
}

if want, got := int64(1783254), pstat.Requests; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}

if want, got := int64(1470906), pstat.Level0; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}

if want, got := int64(2625), pstat.Missed; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}

if want, got := int64(2525), pstat.Evicted; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}

if want, got := int64(100), pstat.MaxUsed; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}

if want, got := int64(10), pstat.CloseTimeouts; want != got {
t.Errorf("want '%d', got '%d'", want, got)
}
}

func TestDynafileCacheToPoints(t *testing.T) {

wants := map[string]point{
"dynafile_cache_requests": point{
Name: "dynafile_cache_requests",
Type: counter,
Value: 1783254,
Description: "number of requests made to obtain a dynafile",
LabelName: "cache",
LabelValue: "cluster",
},
"dynafile_cache_level0": point{
Name: "dynafile_cache_level0",
Type: counter,
Value: 1470906,
Description: "number of requests for the current active file",
LabelName: "cache",

LabelValue: "cluster",
},
"dynafile_cache_missed": point{
Name: "dynafile_cache_missed",
Type: counter,
Value: 2625,
Description: "number of cache misses",
LabelName: "cache",
LabelValue: "cluster",
},
"dynafile_cache_evicted": point{
Name: "dynafile_cache_evicted",
Type: counter,
Value: 2525,
Description: "number of times a file needed to be evicted from cache",
LabelName: "cache",
LabelValue: "cluster",
},
"dynafile_cache_maxused": point{
Name: "dynafile_cache_maxused",
Type: counter,
Value: 100,
Description: "maximum number of cache entries ever used",
LabelName: "cache",
LabelValue: "cluster",
},
"dynafile_cache_closetimeouts": point{
Name: "dynafile_cache_closetimeouts",
Type: counter,
Value: 10,
Description: "number of times a file was closed due to timeout settings",
LabelName: "cache",
LabelValue: "cluster",
},
}

seen := map[string]bool{}
for name, _ := range wants {
seen[name] = false
}

pstat, err := newDynafileCacheFromJSON(dynafileCacheLog)
if err != nil {
t.Fatalf("expected parsing dynafile cache stat not to fail, got: %v", err)
}

points := pstat.toPoints()
for _, got := range points {
want, ok := wants[got.Name]
if !ok {
t.Errorf("unexpected point, got: %+v", got)
continue
}

if !reflect.DeepEqual(want, *got) {
t.Errorf("expected point to be %+v, got %+v", want, got)
}

if seen[got.Name] {
t.Errorf("point seen multiple times: %+v", got)
}
seen[got.Name] = true
}

for name, ok := range seen {
if !ok {
t.Errorf("expected to see point with key %s, but did not", name)
}
}
}
9 changes: 9 additions & 0 deletions exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ const (
rsyslogQueue
rsyslogResource
rsyslogDynStat
rsyslogDynafileCache
)

type rsyslogExporter struct {
Expand Down Expand Up @@ -93,6 +94,14 @@ func (re *rsyslogExporter) handleStatLine(rawbuf []byte) error {
for _, p := range s.toPoints() {
re.set(p)
}
case rsyslogDynafileCache:
d, err := newDynafileCacheFromJSON(buf)
if err != nil {
return err
}
for _, p := range d.toPoints() {
re.set(p)
}

default:
return fmt.Errorf("unknown pstat type: %v", pstatType)
Expand Down
28 changes: 28 additions & 0 deletions exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,34 @@ func TestHandleLineWithGlobal(t *testing.T) {
testHelper(t, log, tests)
}

func TestHandleLineWithDynafileCache(t *testing.T) {
tests := []*testUnit{
&testUnit{
Name: "dynafile_cache_requests",
Val: 412044,
LabelValue: "cluster",
},
&testUnit{
Name: "dynafile_cache_level0",
Val: 294002,
LabelValue: "cluster",
},
&testUnit{
Name: "dynafile_cache_missed",
Val: 210,
LabelValue: "cluster",
},
&testUnit{
Name: "dynafile_cache_evicted",
Val: 14,
LabelValue: "cluster",
},
}

dynafileCacheLog := []byte(`2019-07-03T17:04:01.312432+00:00 some-node.example.org rsyslogd-pstats: { "name": "dynafile cache cluster", "origin": "omfile", "requests": 412044, "level0": 294002, "missed": 210, "evicted": 14, "maxused": 100, "closetimeouts": 0 }`)
testHelper(t, dynafileCacheLog, tests)
}

func TestHandleUnknown(t *testing.T) {
unknownLog := []byte(`2017-08-30T08:10:04.786350+00:00 some-node.example.org rsyslogd-pstats: {"a":"b"}`)

Expand Down
2 changes: 2 additions & 0 deletions utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ func getStatType(buf []byte) rsyslogType {
return rsyslogResource
} else if strings.Contains(line, "dynstats") {
return rsyslogDynStat
} else if strings.Contains(line, "dynafile cache") {
return rsyslogDynafileCache
}
return rsyslogUnknown
}

0 comments on commit e444f32

Please sign in to comment.