Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow rules to be skipped #66

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions docs/create_rules.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,21 @@ def sql_has_reasonable_number_of_lines(model: Model, max_lines: int = 200) -> Ru
message=f"SQL query too long: {count_lines} lines (> {max_lines})."
)
```

### Filtering, skipping and expanding

It's possible to modify existing rules, either built-in or custom.
A possible use case is skipping a default rule for certain models.
Remember to also disable the original rule (see [Configuration](configuration.md)).
diorge marked this conversation as resolved.
Show resolved Hide resolved

```python
from dbt_score import Model, rule, RuleViolation, SkipRule

@rule
def schema_x_has_description(model: Model) -> RuleViolation | SkipRule | None:
"""Models in schema X should have a description."""
from dbt_score.rules.generic import has_description
if model.schema.lower() == 'x':
return SkipRule()
return has_description.evaluate(None, model)
```
4 changes: 2 additions & 2 deletions src/dbt_score/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Init dbt_score package."""

from dbt_score.models import Model
from dbt_score.rule import Rule, RuleViolation, Severity, rule
from dbt_score.rule import Rule, RuleViolation, Severity, SkipRule, rule

__all__ = ["Model", "Rule", "RuleViolation", "Severity", "rule"]
__all__ = ["Model", "Rule", "RuleViolation", "Severity", "SkipRule", "rule"]
7 changes: 4 additions & 3 deletions src/dbt_score/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@

from dbt_score.formatters import Formatter
from dbt_score.models import ManifestLoader, Model
from dbt_score.rule import Rule, RuleViolation
from dbt_score.rule import Rule, RuleViolation, SkipRule
from dbt_score.rule_registry import RuleRegistry
from dbt_score.scoring import Score, Scorer

# The results of a given model are stored in a dictionary, mapping rules to either:
# - None if there was no issue
# - A RuleViolation if a linting error was found
# - An Exception if the rule failed to run
ModelResultsType = dict[Type[Rule], None | RuleViolation | Exception]
ModelResultsType = dict[Type[Rule], None | RuleViolation | SkipRule | Exception]


class Evaluation:
Expand Down Expand Up @@ -57,7 +57,8 @@ def evaluate(self) -> None:
self.results[model] = {}
for rule in rules:
try:
result: RuleViolation | None = rule.evaluate(model, **rule.config)
result: RuleViolation | SkipRule | None = \
rule.evaluate(model, **rule.config)
except Exception as e:
self.results[model][rule.__class__] = e
else:
Expand Down
4 changes: 3 additions & 1 deletion src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dbt_score.evaluation import ModelResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Model
from dbt_score.rule import RuleViolation
from dbt_score.rule import RuleViolation, SkipRule
from dbt_score.scoring import Score


Expand Down Expand Up @@ -40,6 +40,8 @@ def model_evaluated(
for rule, result in results.items():
if result is None:
print(f"{self.indent}{self.label_ok} {rule.source()}")
elif isinstance(result, SkipRule):
continue
elif isinstance(result, RuleViolation):
print(
f"{self.indent}{self.label_warning} "
Expand Down
12 changes: 9 additions & 3 deletions src/dbt_score/rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,13 @@ class RuleViolation:
message: str | None = None


RuleEvaluationType: TypeAlias = Callable[[Model], RuleViolation | None]
@dataclass
class SkipRule:
"""This evaluation of the rule should be skipped."""
pass


RuleEvaluationType: TypeAlias = Callable[[Model], RuleViolation | SkipRule | None]


class Rule:
Expand Down Expand Up @@ -85,7 +91,7 @@ def process_config(self, rule_config: RuleConfig) -> None:
) if rule_config.severity else rule_config.severity
self.config = config

def evaluate(self, model: Model) -> RuleViolation | None:
def evaluate(self, model: Model) -> RuleViolation | SkipRule | None:
"""Evaluates the rule."""
raise NotImplementedError("Subclass must implement method `evaluate`.")

Expand Down Expand Up @@ -154,7 +160,7 @@ def decorator_rule(
func.__doc__.split("\n")[0] if func.__doc__ else None
)

def wrapped_func(self: Rule, *args: Any, **kwargs: Any) -> RuleViolation | None:
def wrapped_func(self: Rule, *args: Any, **kwargs: Any) -> RuleViolation | SkipRule | None:
"""Wrap func to add `self`."""
return func(*args, **kwargs)

Expand Down
11 changes: 8 additions & 3 deletions src/dbt_score/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

if typing.TYPE_CHECKING:
from dbt_score.evaluation import ModelResultsType
from dbt_score.rule import RuleViolation, Severity
from dbt_score.rule import RuleViolation, Severity, SkipRule


@dataclass
Expand Down Expand Up @@ -39,7 +39,11 @@ def __init__(self, config: Config) -> None:

def score_model(self, model_results: ModelResultsType) -> Score:
"""Compute the score of a given model."""
if len(model_results) == 0:
rule_count = sum(1
for rule, result in model_results.items()
if not isinstance(result, SkipRule))

if rule_count == 0:
# No rule? No problem
score = self.max_score
elif any(
Expand All @@ -58,9 +62,10 @@ def score_model(self, model_results: ModelResultsType) -> Score:
if isinstance(result, RuleViolation) # Either 0/3, 1/3 or 2/3
else self.score_cardinality # 3/3
for rule, result in model_results.items()
if not isinstance(result, SkipRule)
]
)
/ (self.score_cardinality * len(model_results))
/ (self.score_cardinality * rule_count)
* self.max_score
)

Expand Down
15 changes: 14 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pathlib import Path
from typing import Any, Type

from dbt_score import Model, Rule, RuleViolation, Severity, rule
from dbt_score import Model, Rule, RuleViolation, Severity, SkipRule, rule
from dbt_score.config import Config
from dbt_score.models import ManifestLoader
from pytest import fixture
Expand Down Expand Up @@ -210,3 +210,16 @@ def rule_error(model: Model) -> RuleViolation | None:
raise Exception("Oh noes, something went wrong")

return rule_error


@fixture
def rule_skippable() -> Type[Rule]:
"""An example rule that may skip."""

@rule
def rule_with_skip(model: Model) -> RuleViolation | SkipRule | None:
"""Skips for model1, passes for model2."""
if model.name == "model1":
return SkipRule()

return rule_with_skip
3 changes: 2 additions & 1 deletion tests/formatters/test_ascii_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Type

from dbt_score.formatters.ascii_formatter import ASCIIFormatter
from dbt_score.evaluation import ModelResultsType
from dbt_score.rule import Rule, RuleViolation
from dbt_score.scoring import Score

Expand All @@ -18,7 +19,7 @@ def test_ascii_formatter_model(
):
"""Ensure the formatter doesn't write anything after model evaluation."""
formatter = ASCIIFormatter(manifest_loader=manifest_loader, config=default_config)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
results: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
Expand Down
7 changes: 4 additions & 3 deletions tests/formatters/test_human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from typing import Type

from dbt_score.formatters.human_readable_formatter import HumanReadableFormatter
from dbt_score.evaluation import ModelResultsType
from dbt_score.rule import Rule, RuleViolation
from dbt_score.scoring import Score

Expand All @@ -20,7 +21,7 @@ def test_human_readable_formatter_model(
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
results: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
Expand Down Expand Up @@ -59,7 +60,7 @@ def test_human_readable_formatter_low_model_score(
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
results: ModelResultsType = {
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(0.0, "🚧"))
Expand Down Expand Up @@ -90,7 +91,7 @@ def test_human_readable_formatter_low_project_score(
formatter = HumanReadableFormatter(
manifest_loader=manifest_loader, config=default_config
)
results: dict[Type[Rule], RuleViolation | Exception | None] = {
results: ModelResultsType = {
rule_severity_critical: RuleViolation("Error"),
}
formatter.model_evaluated(model1, results, Score(10.0, "🥇"))
Expand Down
7 changes: 4 additions & 3 deletions tests/formatters/test_manifest_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Type

from dbt_score.formatters.manifest_formatter import ManifestFormatter
from dbt_score.evaluation import ModelResultsType
from dbt_score.rule import Rule, RuleViolation
from dbt_score.scoring import Score

Expand All @@ -21,7 +22,7 @@ def test_manifest_formatter_model(
formatter = ManifestFormatter(
manifest_loader=manifest_loader, config=default_config
)
results = {
results: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
Expand All @@ -45,12 +46,12 @@ def test_manifest_formatter_project(
formatter = ManifestFormatter(
manifest_loader=manifest_loader, config=default_config
)
result1: dict[Type[Rule], RuleViolation | Exception | None] = {
result1: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: Exception("Oh noes"),
rule_severity_critical: RuleViolation("Error"),
}
result2: dict[Type[Rule], RuleViolation | Exception | None] = {
result2: ModelResultsType = {
rule_severity_low: None,
rule_severity_medium: None,
rule_severity_critical: None,
Expand Down
29 changes: 28 additions & 1 deletion tests/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from dbt_score.config import Config
from dbt_score.evaluation import Evaluation
from dbt_score.models import ManifestLoader
from dbt_score.rule import RuleViolation
from dbt_score.rule import RuleViolation, SkipRule
from dbt_score.rule_registry import RuleRegistry
from dbt_score.scoring import Score

Expand Down Expand Up @@ -179,3 +179,30 @@ def test_evaluation_rule_with_config(
)
assert evaluation.results[model1][rule_with_config] is not None
assert evaluation.results[model2][rule_with_config] is None


def test_evaluation_with_skip(manifest_path, default_config, rule_skippable):
"""Test rule set to skip."""
manifest_loader = ManifestLoader(manifest_path)
mock_formatter = Mock()
mock_scorer = Mock()

rule_registry = RuleRegistry(default_config)
rule_registry._add_rule(rule_skippable)

# Ensure we get a valid Score object from the Mock
mock_scorer.score_model.return_value = Score(10, "🥇")

evaluation = Evaluation(
rule_registry=rule_registry,
manifest_loader=manifest_loader,
formatter=mock_formatter,
scorer=mock_scorer,
)
evaluation.evaluate()

model1 = manifest_loader.models[0]
model2 = manifest_loader.models[1]

assert isinstance(evaluation.results[model1][rule_skippable], SkipRule)
assert evaluation.results[model2][rule_skippable] is None
23 changes: 22 additions & 1 deletion tests/test_scoring.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Unit tests for the scoring module."""


from dbt_score.rule import RuleViolation
from dbt_score.rule import RuleViolation, SkipRule
from dbt_score.scoring import Score, Scorer


Expand Down Expand Up @@ -115,6 +115,27 @@ def test_scorer_model_multiple_rules(
)


def test_scorer_skipping_rule(default_config, rule_skippable, rule_severity_medium):
"""Test scorer with a model that skips."""
scorer = Scorer(config=default_config)
scorer.score_model({rule_skippable: SkipRule()})

assert scorer.score_model({rule_skippable: SkipRule()}).value == 10.0

assert (
round(
scorer.score_model(
{
rule_skippable: SkipRule(),
rule_severity_medium: RuleViolation("error")
}
).value,
2,
)
== 3.33
)


def test_scorer_aggregate_empty(default_config):
"""Test scorer aggregation with no results."""
scorer = Scorer(config=default_config)
Expand Down