Skip to content

Commit

Permalink
[receiver/mongodbatlasreceiver] add metrics project config (#28866)
Browse files Browse the repository at this point in the history
**Description:** <Describe what has changed.>
<!--Ex. Fixing a bug - Describe the bug and how this fixes the issue.
Ex. Adding a feature - Explain what this achieves.-->
This feature adds a Project Config for the metrics to filter by Project
name and or clusters.

**Link to tracking Issue:** <Issue number if applicable>
#28865 

**Testing:** <Describe what testing was performed and which tests were
added.>
- Added test for cluster filtering
- Tested project name alone, project name with IncludeClusters and
project name with ExcludeClusters on a live environment with success.

**Documentation:** <Describe the documentation added.>
Added optional project config fields to README

---------

Co-authored-by: Daniel Jaglowski <jaglows3@gmail.com>
  • Loading branch information
JonathanWamsley and djaglowski authored Nov 17, 2023
1 parent 1015281 commit 8372ccc
Show file tree
Hide file tree
Showing 6 changed files with 239 additions and 23 deletions.
27 changes: 27 additions & 0 deletions .chloggen/mongodbatlasreceiver-add-metrics-project-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: receiver/mongodbatlasreceiver

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: adds project config to mongodbatlas metrics to filter by project name and clusters.

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [28865]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
5 changes: 5 additions & 0 deletions receiver/mongodbatlasreceiver/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ MongoDB Atlas [Documentation](https://www.mongodb.com/docs/atlas/reference/api/l
- `granularity` (default `PT1M` - See [MongoDB Atlas Documentation](https://docs.atlas.mongodb.com/reference/api/process-measurements/))
- `collection_interval` (default `3m`) This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`.
- `storage` (optional) The component ID of a storage extension which can be used when polling for `alerts` or `events` . The storage extension prevents duplication of data after a collector restart by remembering which data were previously collected.
- `projects` (optional for metrics) a slice of projects this receiver collects metrics from instead of all projects in an organization
- `name` Name of the project to discover metrics from
- `include_clusters` (default empty, exclusive with `exclude_clusters`)
- `exclude_clusters` (default empty, exclusive with `include_clusters`)
- If both `include_clusters` and `exclude_clusters` are empty, then all clusters in the project will be included
- `retry_on_failure`
- `enabled` (default true)
- `initial_interval` (default 5s)
Expand Down
7 changes: 7 additions & 0 deletions receiver/mongodbatlasreceiver/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ type Config struct {
PrivateKey configopaque.String `mapstructure:"private_key"`
Granularity string `mapstructure:"granularity"`
MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"`
Projects []*ProjectConfig `mapstructure:"projects"`
Alerts AlertConfig `mapstructure:"alerts"`
Events *EventsConfig `mapstructure:"events"`
Logs LogConfig `mapstructure:"logs"`
Expand Down Expand Up @@ -133,6 +134,12 @@ var (
func (c *Config) Validate() error {
var errs error

for _, project := range c.Projects {
if len(project.ExcludeClusters) != 0 && len(project.IncludeClusters) != 0 {
errs = multierr.Append(errs, errClusterConfig)
}
}

errs = multierr.Append(errs, c.Alerts.validate())
errs = multierr.Append(errs, c.Logs.validate())
if c.Events != nil {
Expand Down
41 changes: 41 additions & 0 deletions receiver/mongodbatlasreceiver/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,47 @@ func TestValidate(t *testing.T) {
},
expectedErr: errNoCert.Error(),
},
{
name: "Valid Metrics Config",
input: Config{
Projects: []*ProjectConfig{
{
Name: "Project1",
},
},
ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type),
},
},
{
name: "Valid Metrics Config with multiple projects with an inclusion or exclusion",
input: Config{
Projects: []*ProjectConfig{
{
Name: "Project1",
IncludeClusters: []string{"Cluster1"},
},
{
Name: "Project2",
ExcludeClusters: []string{"Cluster1"},
},
},
ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type),
},
},
{
name: "invalid Metrics Config",
input: Config{
Projects: []*ProjectConfig{
{
Name: "Project1",
IncludeClusters: []string{"Cluster1"},
ExcludeClusters: []string{"Cluster2"},
},
},
ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(metadata.Type),
},
expectedErr: errClusterConfig.Error(),
},
{
name: "Valid Logs Config",
input: Config{
Expand Down
116 changes: 93 additions & 23 deletions receiver/mongodbatlasreceiver/receiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ type timeconstraints struct {

func newMongoDBAtlasReceiver(settings receiver.CreateSettings, cfg *Config) *mongodbatlasreceiver {
client := internal.NewMongoDBAtlasClient(cfg.PublicKey, string(cfg.PrivateKey), cfg.RetrySettings, settings.Logger)
for _, p := range cfg.Projects {
p.populateIncludesAndExcludes()
}

return &mongodbatlasreceiver{
log: settings.Logger,
cfg: cfg,
Expand Down Expand Up @@ -77,47 +81,113 @@ func (s *mongodbatlasreceiver) shutdown(context.Context) error {
return s.client.Shutdown()
}

// poll decides whether to poll all projects or a specific project based on the configuration.
func (s *mongodbatlasreceiver) poll(ctx context.Context, time timeconstraints) error {
if len(s.cfg.Projects) == 0 {
return s.pollAllProjects(ctx, time)
}
return s.pollProjects(ctx, time)
}

// pollAllProjects handles polling across all projects within the organizations.
func (s *mongodbatlasreceiver) pollAllProjects(ctx context.Context, time timeconstraints) error {
orgs, err := s.client.Organizations(ctx)
if err != nil {
return fmt.Errorf("error retrieving organizations: %w", err)
}
for _, org := range orgs {
projects, err := s.client.Projects(ctx, org.ID)
proj, err := s.client.Projects(ctx, org.ID)
if err != nil {
return fmt.Errorf("error retrieving projects: %w", err)
s.log.Error("error retrieving projects", zap.String("orgID", org.ID), zap.Error(err))
continue
}
for _, project := range projects {
nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID)
if err != nil {
return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err)
for _, project := range proj {
// Since there is no specific ProjectConfig for these projects, pass nil.
if err := s.processProject(ctx, time, org.Name, project, nil); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
}
return nil
}

processes, err := s.client.Processes(ctx, project.ID)
if err != nil {
return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err)
}
for _, process := range processes {
clusterName := nodeClusterMap[process.UserAlias]
providerValues := providerMap[clusterName]
// pollProject handles polling for specific projects as configured.
func (s *mongodbatlasreceiver) pollProjects(ctx context.Context, time timeconstraints) error {
for _, projectCfg := range s.cfg.Projects {
project, err := s.client.GetProject(ctx, projectCfg.Name)
if err != nil {
s.log.Error("error retrieving project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}

if err := s.extractProcessMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
org, err := s.client.GetOrganization(ctx, project.OrgID)
if err != nil {
s.log.Error("error retrieving organization from project", zap.String("projectName", projectCfg.Name), zap.Error(err))
continue
}

if err := s.extractProcessDatabaseMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
if err := s.processProject(ctx, time, org.Name, project, projectCfg); err != nil {
s.log.Error("error processing project", zap.String("projectID", project.ID), zap.Error(err))
}
}
return nil
}

if err := s.extractProcessDiskMetrics(ctx, time, org.Name, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
}
func (s *mongodbatlasreceiver) processProject(ctx context.Context, time timeconstraints, orgName string, project *mongodbatlas.Project, projectCfg *ProjectConfig) error {
nodeClusterMap, providerMap, err := s.getNodeClusterNameMap(ctx, project.ID)
if err != nil {
return fmt.Errorf("error collecting clusters from project %s: %w", project.ID, err)
}

processes, err := s.client.Processes(ctx, project.ID)
if err != nil {
return fmt.Errorf("error retrieving MongoDB Atlas processes for project %s: %w", project.ID, err)
}

for _, process := range processes {
clusterName := nodeClusterMap[process.UserAlias]
providerValues := providerMap[clusterName]

if !shouldProcessCluster(projectCfg, clusterName) {
// Skip processing for this cluster
continue
}

if err := s.extractProcessMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}

if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process database metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}

if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process, clusterName, providerValues); err != nil {
return fmt.Errorf("error when polling process disk metrics from MongoDB Atlas for process %s: %w", process.ID, err)
}
}

return nil
}

// shouldProcessCluster checks whether a given cluster should be processed based on the project configuration.
func shouldProcessCluster(projectCfg *ProjectConfig, clusterName string) bool {
if projectCfg == nil {
// If there is no project config, process all clusters.
return true
}

_, isIncluded := projectCfg.includesByClusterName[clusterName]
_, isExcluded := projectCfg.excludesByClusterName[clusterName]

// Return false immediately if the cluster is excluded.
if isExcluded {
return false
}

// If IncludeClusters is empty, or the cluster is explicitly included, return true.
return len(projectCfg.IncludeClusters) == 0 || isIncluded
}

type providerValues struct {
RegionName string
ProviderName string
Expand Down
66 changes: 66 additions & 0 deletions receiver/mongodbatlasreceiver/receiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,69 @@ func TestTimeConstraints(t *testing.T) {
t.Run(testCase.name, testCase.run)
}
}

func TestShouldProcessCluster(t *testing.T) {
tests := []struct {
name string
projectCfg *ProjectConfig
clusterName string
want bool
}{
{
name: "included cluster should be processed",
projectCfg: &ProjectConfig{
IncludeClusters: []string{"Cluster1"},
},
clusterName: "Cluster1",
want: true,
},
{
name: "cluster not included should not be processed",
projectCfg: &ProjectConfig{
IncludeClusters: []string{"Cluster1"},
},
clusterName: "Cluster2",
want: false,
},
{
name: "excluded cluster should not be processed",
projectCfg: &ProjectConfig{
ExcludeClusters: []string{"Cluster2"},
},
clusterName: "Cluster2",
want: false,
},
{
name: "cluster not excluded should processed assuming it exists in the project",
projectCfg: &ProjectConfig{
ExcludeClusters: []string{"Cluster1"},
},
clusterName: "Cluster2",
want: true,
},
{
name: "cluster should be processed when no includes or excludes are set",
projectCfg: &ProjectConfig{},
clusterName: "Cluster1",
want: true,
},
{
name: "cluster should be processed when no includes or excludes are set and cluster name is empty",
projectCfg: nil,
clusterName: "Cluster1",
want: true,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.projectCfg != nil {
tt.projectCfg.populateIncludesAndExcludes()
}

if got := shouldProcessCluster(tt.projectCfg, tt.clusterName); got != tt.want {
t.Errorf("shouldProcessCluster() = %v, want %v", got, tt.want)
}
})
}
}

0 comments on commit 8372ccc

Please sign in to comment.