diff --git a/CHANGELOG.md b/CHANGELOG.md index 16a21a1696c..608caed3dc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ### Features - Capture changes to macros in `state:modified`. Introduce new `state:` sub-selectors: `modified.body`, `modified.configs`, `modified.persisted_descriptions`, `modified.relation`, `modified.macros` ([#2704](https://github.com/dbt-labs/dbt/issues/2704), [#3278](https://github.com/dbt-labs/dbt/issues/3278), [#3559](https://github.com/dbt-labs/dbt/issues/3559)) +- Enable setting configs in schema files for models, seeds, snapshots, analyses, tests ([#2401](https://github.com/dbt-labs/dbt/issues/2401), [#3616](https://github.com/dbt-labs/dbt/pull/3616)) ### Fixes @@ -15,6 +16,8 @@ ## dbt 0.21.0b1 (August 03, 2021) +## dbt 0.21.0b1 (August 03, 2021) + ### Breaking changes - Add full node selection to source freshness command and align selection syntax with other tasks (`dbt source freshness --select source_name` --> `dbt source freshness --select source:souce_name`) and rename `dbt source snapshot-freshness` -> `dbt source freshness`. ([#2987](https://github.com/dbt-labs/dbt/issues/2987), [#3554](https://github.com/dbt-labs/dbt/pull/3554)) diff --git a/core/dbt/context/context_config.py b/core/dbt/context/context_config.py index 07f93224324..3dd3779e23b 100644 --- a/core/dbt/context/context_config.py +++ b/core/dbt/context/context_config.py @@ -125,6 +125,7 @@ def calculate_node_config( resource_type: NodeType, project_name: str, base: bool, + patch_config_dict: Dict[str, Any] = None ) -> BaseConfig: own_config = self.get_node_project(project_name) @@ -134,6 +135,12 @@ def calculate_node_config( for fqn_config in project_configs: result = self._update_from_config(result, fqn_config) + # When schema files patch config, it has lower precedence than + # config in the models (config_call_dict), so we add the patch_config_dict + # before the config_call_dict + if patch_config_dict: + result = self._update_from_config(result, patch_config_dict) + # config_calls are created in the 'experimental' model parser and # the ParseConfigObject (via add_config_call) result = self._update_from_config(result, config_call_dict) @@ -153,6 +160,7 @@ def calculate_node_config_dict( resource_type: NodeType, project_name: str, base: bool, + patch_config_dict: Dict[str, Any], ) -> Dict[str, Any]: ... @@ -192,6 +200,7 @@ def calculate_node_config_dict( resource_type: NodeType, project_name: str, base: bool, + patch_config_dict: dict = None ) -> Dict[str, Any]: config = self.calculate_node_config( config_call_dict=config_call_dict, @@ -199,6 +208,7 @@ def calculate_node_config_dict( resource_type=resource_type, project_name=project_name, base=base, + patch_config_dict=patch_config_dict ) finalized = config.finalize_and_validate() return finalized.to_dict(omit_none=True) @@ -215,6 +225,7 @@ def calculate_node_config_dict( resource_type: NodeType, project_name: str, base: bool, + patch_config_dict: dict = None ) -> Dict[str, Any]: return self.calculate_node_config( config_call_dict=config_call_dict, @@ -222,6 +233,7 @@ def calculate_node_config_dict( resource_type=resource_type, project_name=project_name, base=base, + patch_config_dict=patch_config_dict ) def initial_result( @@ -284,6 +296,7 @@ def build_config_dict( base: bool = False, *, rendered: bool = True, + patch_config_dict: dict = None ) -> Dict[str, Any]: if rendered: src = ContextConfigGenerator(self._active_project) @@ -296,4 +309,5 @@ def build_config_dict( resource_type=self._resource_type, project_name=self._project_name, base=base, + patch_config_dict=patch_config_dict ) diff --git a/core/dbt/context/providers.py b/core/dbt/context/providers.py index 4599a85b07f..6f20d287f49 100644 --- a/core/dbt/context/providers.py +++ b/core/dbt/context/providers.py @@ -1243,7 +1243,7 @@ class ModelContext(ProviderContext): @contextproperty def pre_hooks(self) -> List[Dict[str, Any]]: - if isinstance(self.model, ParsedSourceDefinition): + if self.model.resource_type in [NodeType.Source, NodeType.Test]: return [] return [ h.to_dict(omit_none=True) for h in self.model.config.pre_hook @@ -1251,7 +1251,7 @@ def pre_hooks(self) -> List[Dict[str, Any]]: @contextproperty def post_hooks(self) -> List[Dict[str, Any]]: - if isinstance(self.model, ParsedSourceDefinition): + if self.model.resource_type in [NodeType.Source, NodeType.Test]: return [] return [ h.to_dict(omit_none=True) for h in self.model.config.post_hook diff --git a/core/dbt/contracts/graph/compiled.py b/core/dbt/contracts/graph/compiled.py index 701528f3312..b0869bb4f47 100644 --- a/core/dbt/contracts/graph/compiled.py +++ b/core/dbt/contracts/graph/compiled.py @@ -109,7 +109,9 @@ class CompiledSnapshotNode(CompiledNode): @dataclass class CompiledDataTestNode(CompiledNode): resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]}) - config: TestConfig = field(default_factory=TestConfig) + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type:ignore @dataclass @@ -117,7 +119,9 @@ class CompiledSchemaTestNode(CompiledNode, HasTestMetadata): # keep this in sync with ParsedSchemaTestNode! resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]}) column_name: Optional[str] = None - config: TestConfig = field(default_factory=TestConfig) + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type:ignore def same_contents(self, other) -> bool: if other is None: diff --git a/core/dbt/contracts/graph/manifest.py b/core/dbt/contracts/graph/manifest.py index 6b21fe4bf10..ad606fed1a9 100644 --- a/core/dbt/contracts/graph/manifest.py +++ b/core/dbt/contracts/graph/manifest.py @@ -14,7 +14,7 @@ CompileResultNode, ManifestNode, NonSourceCompiledNode, GraphMemberNode ) from dbt.contracts.graph.parsed import ( - ParsedMacro, ParsedDocumentation, ParsedNodePatch, ParsedMacroPatch, + ParsedMacro, ParsedDocumentation, ParsedSourceDefinition, ParsedExposure, HasUniqueID, UnpatchedSourceDefinition, ManifestNodes ) @@ -26,9 +26,7 @@ from dbt.dataclass_schema import dbtClassMixin from dbt.exceptions import ( CompilationException, - raise_duplicate_resource_name, raise_compiler_error, warn_or_error, - raise_duplicate_patch_name, - raise_duplicate_macro_patch_name, raise_duplicate_source_patch_name, + raise_duplicate_resource_name, raise_compiler_error, ) from dbt.helper_types import PathSet from dbt.logger import GLOBAL_LOGGER as logger @@ -718,60 +716,6 @@ def get_resource_fqns(self) -> Mapping[str, PathSet]: resource_fqns[resource_type_plural].add(tuple(resource.fqn)) return resource_fqns - # This is called by 'parse_patch' in the NodePatchParser - def add_patch( - self, source_file: SchemaSourceFile, patch: ParsedNodePatch, - ) -> None: - if patch.yaml_key in ['models', 'seeds', 'snapshots']: - unique_id = self.ref_lookup.get_unique_id(patch.name, None) - elif patch.yaml_key == 'analyses': - unique_id = self.analysis_lookup.get_unique_id(patch.name, None) - else: - raise dbt.exceptions.InternalException( - f'Unexpected yaml_key {patch.yaml_key} for patch in ' - f'file {source_file.path.original_file_path}' - ) - if unique_id is None: - # This will usually happen when a node is disabled - return - - # patches can't be overwritten - node = self.nodes.get(unique_id) - if node: - if node.patch_path: - package_name, existing_file_path = node.patch_path.split('://') - raise_duplicate_patch_name(patch, existing_file_path) - source_file.append_patch(patch.yaml_key, unique_id) - node.patch(patch) - - def add_macro_patch( - self, source_file: SchemaSourceFile, patch: ParsedMacroPatch, - ) -> None: - # macros are fully namespaced - unique_id = f'macro.{patch.package_name}.{patch.name}' - macro = self.macros.get(unique_id) - if not macro: - warn_or_error( - f'WARNING: Found documentation for macro "{patch.name}" ' - f'which was not found' - ) - return - if macro.patch_path: - package_name, existing_file_path = macro.patch_path.split('://') - raise_duplicate_macro_patch_name(patch, existing_file_path) - source_file.macro_patches[patch.name] = unique_id - macro.patch(patch) - - def add_source_patch( - self, source_file: SchemaSourceFile, patch: SourcePatch, - ) -> None: - # source patches must be unique - key = (patch.overrides, patch.name) - if key in self.source_patches: - raise_duplicate_source_patch_name(patch, self.source_patches[key]) - self.source_patches[key] = patch - source_file.source_patches.append(key) - def get_used_schemas(self, resource_types=None): return frozenset({ (node.database, node.schema) for node in diff --git a/core/dbt/contracts/graph/model_config.py b/core/dbt/contracts/graph/model_config.py index d2ddf8aa9c2..7a5b621fbf2 100644 --- a/core/dbt/contracts/graph/model_config.py +++ b/core/dbt/contracts/graph/model_config.py @@ -268,9 +268,10 @@ def same_contents( return True # This is used in 'add_config_call' to created the combined config_call_dict. + # 'meta' moved here from node mergebehavior = { "append": ['pre-hook', 'pre_hook', 'post-hook', 'post_hook', 'tags'], - "update": ['quoting', 'column_types'], + "update": ['quoting', 'column_types', 'meta'], } @classmethod @@ -355,10 +356,38 @@ class SourceConfig(BaseConfig): @dataclass -class NodeConfig(BaseConfig): +class NodeAndTestConfig(BaseConfig): + enabled: bool = True + # these fields are included in serialized output, but are not part of + # config comparison (they are part of database_representation) + alias: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + schema: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + database: Optional[str] = field( + default=None, + metadata=CompareBehavior.Exclude.meta(), + ) + tags: Union[List[str], str] = field( + default_factory=list_str, + metadata=metas(ShowBehavior.Hide, + MergeBehavior.Append, + CompareBehavior.Exclude), + ) + meta: Dict[str, Any] = field( + default_factory=dict, + metadata=MergeBehavior.Update.meta(), + ) + + +@dataclass +class NodeConfig(NodeAndTestConfig): # Note: if any new fields are added with MergeBehavior, also update the # 'mergebehavior' dictionary - enabled: bool = True materialized: str = 'view' persist_docs: Dict[str, Any] = field(default_factory=dict) post_hook: List[Hook] = field( @@ -369,11 +398,6 @@ class NodeConfig(BaseConfig): default_factory=list, metadata=MergeBehavior.Append.meta(), ) - # this only applies for config v1, so it doesn't participate in comparison - vars: Dict[str, Any] = field( - default_factory=dict, - metadata=metas(CompareBehavior.Exclude, MergeBehavior.Update), - ) quoting: Dict[str, Any] = field( default_factory=dict, metadata=MergeBehavior.Update.meta(), @@ -384,26 +408,6 @@ class NodeConfig(BaseConfig): default_factory=dict, metadata=MergeBehavior.Update.meta(), ) - # these fields are included in serialized output, but are not part of - # config comparison (they are part of database_representation) - alias: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - schema: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - database: Optional[str] = field( - default=None, - metadata=CompareBehavior.Exclude.meta(), - ) - tags: Union[List[str], str] = field( - default_factory=list_str, - metadata=metas(ShowBehavior.Hide, - MergeBehavior.Append, - CompareBehavior.Exclude), - ) full_refresh: Optional[bool] = None on_schema_change: Optional[str] = 'ignore' @@ -447,7 +451,8 @@ class SeedConfig(NodeConfig): @dataclass -class TestConfig(NodeConfig): +class TestConfig(NodeAndTestConfig): + # this is repeated because of a different default schema: Optional[str] = field( default='dbt_test__audit', metadata=CompareBehavior.Exclude.meta(), diff --git a/core/dbt/contracts/graph/parsed.py b/core/dbt/contracts/graph/parsed.py index 2220ffaf9e9..c30aa4c6f72 100644 --- a/core/dbt/contracts/graph/parsed.py +++ b/core/dbt/contracts/graph/parsed.py @@ -148,6 +148,7 @@ def patch(self, patch: 'ParsedNodePatch'): """Given a ParsedNodePatch, add the new information to the node.""" # explicitly pick out the parts to update so we don't inadvertently # step on the model name or anything + # Note: config should already be updated self.patch_path: Optional[str] = patch.file_id # update created_at so process_docs will run in partial parsing self.created_at = int(time.time()) @@ -166,9 +167,6 @@ def patch(self, patch: 'ParsedNodePatch'): def get_materialization(self): return self.config.materialized - def local_vars(self): - return self.config.vars - @dataclass class ParsedNodeMandatory( @@ -203,6 +201,7 @@ class ParsedNodeDefaults(ParsedNodeMandatory): deferred: bool = False unrendered_config: Dict[str, Any] = field(default_factory=dict) created_at: int = field(default_factory=lambda: int(time.time())) + config_call_dict: Dict[str, Any] = field(default_factory=dict) def write_node(self, target_path: str, subdirectory: str, payload: str): if (os.path.basename(self.path) == @@ -229,6 +228,11 @@ class ParsedNode(ParsedNodeDefaults, ParsedNodeMixins, SerializableType): def _serialize(self): return self.to_dict() + def __post_serialize__(self, dct): + if 'config_call_dict' in dct: + del dct['config_call_dict'] + return dct + @classmethod def _deserialize(cls, dct: Dict[str, int]): # The serialized ParsedNodes do not differ from each other @@ -258,10 +262,16 @@ def _deserialize(cls, dct: Dict[str, int]): return cls.from_dict(dct) def _persist_column_docs(self) -> bool: - return bool(self.config.persist_docs.get('columns')) + if hasattr(self.config, 'persist_docs'): + assert isinstance(self.config, NodeConfig) + return bool(self.config.persist_docs.get('columns')) + return False def _persist_relation_docs(self) -> bool: - return bool(self.config.persist_docs.get('relation')) + if hasattr(self.config, 'persist_docs'): + assert isinstance(self.config, NodeConfig) + return bool(self.config.persist_docs.get('relation')) + return False def same_body(self: T, other: T) -> bool: return self.raw_sql == other.raw_sql @@ -411,7 +421,9 @@ class HasTestMetadata(dbtClassMixin): @dataclass class ParsedDataTestNode(ParsedNode): resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]}) - config: TestConfig = field(default_factory=TestConfig) + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type: ignore @dataclass @@ -419,7 +431,9 @@ class ParsedSchemaTestNode(ParsedNode, HasTestMetadata): # keep this in sync with CompiledSchemaTestNode! resource_type: NodeType = field(metadata={'restrict': [NodeType.Test]}) column_name: Optional[str] = None - config: TestConfig = field(default_factory=TestConfig) + # Was not able to make mypy happy and keep the code working. We need to + # refactor the various configs. + config: TestConfig = field(default_factory=TestConfig) # type: ignore def same_contents(self, other) -> bool: if other is None: @@ -456,6 +470,7 @@ class ParsedPatch(HasYamlMetadata, Replaceable): description: str meta: Dict[str, Any] docs: Docs + config: Dict[str, Any] # The parsed node update is only the 'patch', not the test. The test became a @@ -487,9 +502,6 @@ class ParsedMacro(UnparsedBaseNode, HasUniqueID): arguments: List[MacroArgument] = field(default_factory=list) created_at: int = field(default_factory=lambda: int(time.time())) - def local_vars(self): - return {} - def patch(self, patch: ParsedMacroPatch): self.patch_path: Optional[str] = patch.file_id self.description = patch.description diff --git a/core/dbt/contracts/graph/unparsed.py b/core/dbt/contracts/graph/unparsed.py index 477c06fc672..673ca1476c5 100644 --- a/core/dbt/contracts/graph/unparsed.py +++ b/core/dbt/contracts/graph/unparsed.py @@ -126,12 +126,17 @@ def file_id(self): @dataclass -class UnparsedAnalysisUpdate(HasColumnDocs, HasDocs, HasYamlMetadata): +class HasConfig(): + config: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class UnparsedAnalysisUpdate(HasConfig, HasColumnDocs, HasDocs, HasYamlMetadata): pass @dataclass -class UnparsedNodeUpdate(HasColumnTests, HasTests, HasYamlMetadata): +class UnparsedNodeUpdate(HasConfig, HasColumnTests, HasTests, HasYamlMetadata): quote_columns: Optional[bool] = None @@ -143,7 +148,7 @@ class MacroArgument(dbtClassMixin): @dataclass -class UnparsedMacroUpdate(HasDocs, HasYamlMetadata): +class UnparsedMacroUpdate(HasConfig, HasDocs, HasYamlMetadata): arguments: List[MacroArgument] = field(default_factory=list) @@ -261,6 +266,7 @@ class UnparsedSourceDefinition(dbtClassMixin, Replaceable): loaded_at_field: Optional[str] = None tables: List[UnparsedSourceTableDefinition] = field(default_factory=list) tags: List[str] = field(default_factory=list) + config: Dict[str, Any] = field(default_factory=dict) @property def yaml_key(self) -> 'str': diff --git a/core/dbt/parser/base.py b/core/dbt/parser/base.py index 8b65d2b3b00..e02ad1d2c02 100644 --- a/core/dbt/parser/base.py +++ b/core/dbt/parser/base.py @@ -256,9 +256,7 @@ def _context_for( parsed_node, self.root_project, self.manifest, config ) - def render_with_context( - self, parsed_node: IntermediateNode, config: ContextConfig - ) -> None: + def render_with_context(self, parsed_node: IntermediateNode, config: ContextConfig): # Given the parsed node and a ContextConfig to use during parsing, # render the node's sql wtih macro capture enabled. # Note: this mutates the config object when config calls are rendered. @@ -273,11 +271,12 @@ def render_with_context( get_rendered( parsed_node.raw_sql, context, parsed_node, capture_macros=True ) + return context # This is taking the original config for the node, converting it to a dict, # updating the config with new config passed in, then re-creating the # config from the dict in the node. - def update_parsed_node_config( + def update_parsed_node_config_dict( self, parsed_node: IntermediateNode, config_dict: Dict[str, Any] ) -> None: # Overwrite node config @@ -294,28 +293,50 @@ def update_parsed_node_name( self._update_node_schema(parsed_node, config_dict) self._update_node_alias(parsed_node, config_dict) - def update_parsed_node( - self, parsed_node: IntermediateNode, config: ContextConfig + def update_parsed_node_config( + self, parsed_node: IntermediateNode, config: ContextConfig, + context=None, patch_config_dict=None ) -> None: """Given the ContextConfig used for parsing and the parsed node, generate and set the true values to use, overriding the temporary parse values set in _build_intermediate_parsed_node. """ - config_dict = config.build_config_dict() - # Set tags on node provided in config blocks + # build_config_dict takes the config_call_dict in the ContextConfig object + # and calls calculate_node_config to combine dbt_project configs and + # config calls from SQL files + config_dict = config.build_config_dict(patch_config_dict=patch_config_dict) + + # Set tags on node provided in config blocks. Tags are additive, so even if + # config has been built before, we don't have to reset tags in the parsed_node. model_tags = config_dict.get('tags', []) - parsed_node.tags.extend(model_tags) + for tag in model_tags: + if tag not in parsed_node.tags: + parsed_node.tags.append(tag) + # If we have meta in the config, copy to node level, for backwards + # compatibility with earlier node-only config. + if 'meta' in config_dict and config_dict['meta']: + parsed_node.meta = config_dict['meta'] + + # unrendered_config is used to compare the original database/schema/alias + # values and to handle 'same_config' and 'same_contents' calls parsed_node.unrendered_config = config.build_config_dict( rendered=False ) + parsed_node.config_call_dict = config._config_call_dict + # do this once before we parse the node database/schema/alias, so # parsed_node.config is what it would be if they did nothing - self.update_parsed_node_config(parsed_node, config_dict) + self.update_parsed_node_config_dict(parsed_node, config_dict) + # This updates the node database/schema/alias self.update_parsed_node_name(parsed_node, config_dict) + # tests don't have hooks + if parsed_node.resource_type == NodeType.Test: + return + # at this point, we've collected our hooks. Use the node context to # render each hook and collect refs/sources hooks = list(itertools.chain(parsed_node.config.pre_hook, @@ -323,9 +344,8 @@ def update_parsed_node( # skip context rebuilding if there aren't any hooks if not hooks: return - # we could cache the original context from parsing this node. Is that - # worth the cost in memory/complexity? - context = self._context_for(parsed_node, config) + if not context: + context = self._context_for(parsed_node, config) for hook in hooks: get_rendered(hook.sql, context, parsed_node, capture_macros=True) @@ -357,8 +377,8 @@ def render_update( self, node: IntermediateNode, config: ContextConfig ) -> None: try: - self.render_with_context(node, config) - self.update_parsed_node(node, config) + context = self.render_with_context(node, config) + self.update_parsed_node_config(node, config, context=context) except ValidationError as exc: # we got a ValidationError - probably bad types in config() msg = validator_error_message(exc) diff --git a/core/dbt/parser/models.py b/core/dbt/parser/models.py index 4e179fa5859..ac7a5d0b313 100644 --- a/core/dbt/parser/models.py +++ b/core/dbt/parser/models.py @@ -126,7 +126,7 @@ def render_update( # this uses the updated config to set all the right things in the node. # if there are hooks present, it WILL render jinja. Will need to change # when the experimental parser supports hooks - self.update_parsed_node(node, config) + self.update_parsed_node_config(node, config) # update the unrendered config with values from the file. # values from yaml files are in there already diff --git a/core/dbt/parser/schema_test_builders.py b/core/dbt/parser/schema_test_builders.py index 87a279fc4d0..7d1ddfbace5 100644 --- a/core/dbt/parser/schema_test_builders.py +++ b/core/dbt/parser/schema_test_builders.py @@ -190,9 +190,9 @@ class TestBuilder(Generic[Testable]): r'(?P([a-zA-Z_][0-9a-zA-Z_]*))' ) # kwargs representing test configs - MODIFIER_ARGS = ( + CONFIG_ARGS = ( 'severity', 'tags', 'enabled', 'where', 'limit', 'warn_if', 'error_if', - 'fail_calc', 'store_failures' + 'fail_calc', 'store_failures', 'meta', 'database', 'schema', 'alias', ) def __init__( @@ -224,13 +224,24 @@ def __init__( groups = match.groupdict() self.name: str = groups['test_name'] self.namespace: str = groups['test_namespace'] - self.modifiers: Dict[str, Any] = {} - for key in self.MODIFIER_ARGS: + self.config: Dict[str, Any] = {} + + for key in self.CONFIG_ARGS: value = self.args.pop(key, None) + # 'modifier' config could be either top level arg or in config + if value and 'config' in self.args and key in self.args['config']: + raise_compiler_error( + 'Test cannot have the same key at the top-level and in config' + ) + if not value and 'config' in self.args: + value = self.args['config'].pop(key, None) if isinstance(value, str): value = get_rendered(value, render_ctx, native=True) if value is not None: - self.modifiers[key] = value + self.config[key] = value + + if 'config' in self.args: + del self.args['config'] if self.namespace is not None: self.package_name = self.namespace @@ -240,8 +251,8 @@ def __init__( self.fqn_name: str = fqn_name # use hashed name as alias if too long - if compiled_name != fqn_name: - self.modifiers['alias'] = compiled_name + if compiled_name != fqn_name and 'alias' not in self.config: + self.config['alias'] = compiled_name def _bad_type(self) -> TypeError: return TypeError('invalid target type "{}"'.format(type(self.target))) @@ -282,15 +293,15 @@ def extract_test_args(test, name=None) -> Tuple[str, Dict[str, Any]]: @property def enabled(self) -> Optional[bool]: - return self.modifiers.get('enabled') + return self.config.get('enabled') @property def alias(self) -> Optional[str]: - return self.modifiers.get('alias') + return self.config.get('alias') @property def severity(self) -> Optional[str]: - sev = self.modifiers.get('severity') + sev = self.config.get('severity') if sev: return sev.upper() else: @@ -298,30 +309,72 @@ def severity(self) -> Optional[str]: @property def store_failures(self) -> Optional[bool]: - return self.modifiers.get('store_failures') + return self.config.get('store_failures') @property def where(self) -> Optional[str]: - return self.modifiers.get('where') + return self.config.get('where') @property def limit(self) -> Optional[int]: - return self.modifiers.get('limit') + return self.config.get('limit') @property def warn_if(self) -> Optional[str]: - return self.modifiers.get('warn_if') + return self.config.get('warn_if') @property def error_if(self) -> Optional[str]: - return self.modifiers.get('error_if') + return self.config.get('error_if') @property def fail_calc(self) -> Optional[str]: - return self.modifiers.get('fail_calc') + return self.config.get('fail_calc') + + @property + def meta(self) -> Optional[dict]: + return self.config.get('meta') + + @property + def database(self) -> Optional[str]: + return self.config.get('database') + + @property + def schema(self) -> Optional[str]: + return self.config.get('schema') + + def get_static_config(self): + config = {} + if self.alias is not None: + config['alias'] = self.alias + if self.severity is not None: + config['severity'] = self.severity + if self.enabled is not None: + config['enabled'] = self.enabled + if self.where is not None: + config['where'] = self.where + if self.limit is not None: + config['limit'] = self.limit + if self.warn_if is not None: + config['warn_if'] = self.warn_if + if self.error_if is not None: + config['error_id'] = self.error_if + if self.fail_calc is not None: + config['fail_calc'] = self.fail_calc + if self.store_failures is not None: + config['store_failures'] = self.store_failures + if self.meta is not None: + config['meta'] = self.meta + if self.database is not None: + config['database'] = self.database + if self.schema is not None: + config['schema'] = self.schema + if self.alias is not None: + config['alias'] = self.alias + return config def tags(self) -> List[str]: - tags = self.modifiers.get('tags', []) + tags = self.config.get('tags', []) if isinstance(tags, str): tags = [tags] if not isinstance(tags, list): @@ -360,7 +413,7 @@ def construct_config(self) -> str: else str(value) ) for key, value - in self.modifiers.items() + in self.config.items() ]) if configs: return f"{{{{ config({configs}) }}}}" diff --git a/core/dbt/parser/schemas.py b/core/dbt/parser/schemas.py index 9a097817d63..4f1ad038d28 100644 --- a/core/dbt/parser/schemas.py +++ b/core/dbt/parser/schemas.py @@ -22,8 +22,7 @@ generate_parse_exposure, generate_test_context ) from dbt.context.macro_resolver import MacroResolver -from dbt.contracts.files import FileHash -from dbt.contracts.graph.manifest import SchemaSourceFile +from dbt.contracts.files import FileHash, SchemaSourceFile from dbt.contracts.graph.parsed import ( ParsedNodePatch, ColumnInfo, @@ -47,7 +46,10 @@ from dbt.exceptions import ( validator_error_message, JSONValidationException, raise_invalid_schema_yml_version, ValidationException, - CompilationException, + CompilationException, raise_duplicate_patch_name, + raise_duplicate_macro_patch_name, InternalException, + raise_duplicate_source_patch_name, + warn_or_error, ) from dbt.node_types import NodeType from dbt.parser.base import SimpleParser @@ -314,8 +316,8 @@ def _parse_generic_test( # is not necessarily this package's name fqn = self.get_fqn(fqn_path, builder.fqn_name) - # this is the config that is used in render_update - config = self.initial_config(fqn) + # this is the ContextConfig that is used in render_update + config: ContextConfig = self.initial_config(fqn) metadata = { 'namespace': builder.namespace, @@ -356,37 +358,10 @@ def render_test_update(self, node, config, builder): node.depends_on.add_macro(macro_unique_id) if (macro_unique_id in ['macro.dbt.test_not_null', 'macro.dbt.test_unique']): - self.update_parsed_node(node, config) - # manually set configs - # note: this does not respect generate_alias_name() macro - if builder.alias is not None: - node.unrendered_config['alias'] = builder.alias - node.config['alias'] = builder.alias - node.alias = builder.alias - if builder.severity is not None: - node.unrendered_config['severity'] = builder.severity - node.config['severity'] = builder.severity - if builder.enabled is not None: - node.unrendered_config['enabled'] = builder.enabled - node.config['enabled'] = builder.enabled - if builder.where is not None: - node.unrendered_config['where'] = builder.where - node.config['where'] = builder.where - if builder.limit is not None: - node.unrendered_config['limit'] = builder.limit - node.config['limit'] = builder.limit - if builder.warn_if is not None: - node.unrendered_config['warn_if'] = builder.warn_if - node.config['warn_if'] = builder.warn_if - if builder.error_if is not None: - node.unrendered_config['error_if'] = builder.error_if - node.config['error_if'] = builder.error_if - if builder.fail_calc is not None: - node.unrendered_config['fail_calc'] = builder.fail_calc - node.config['fail_calc'] = builder.fail_calc - if builder.store_failures is not None: - node.unrendered_config['store_failures'] = builder.store_failures - node.config['store_failures'] = builder.store_failures + config_call_dict = builder.get_static_config() + config._config_call_dict = config_call_dict + # This sets the config from dbt_project + self.update_parsed_node_config(node, config) # source node tests are processed at patch_source time if isinstance(builder.target, UnpatchedSourceDefinition): sources = [builder.target.fqn[-2], builder.target.fqn[-1]] @@ -406,7 +381,7 @@ def render_test_update(self, node, config, builder): get_rendered( node.raw_sql, context, node, capture_macros=True ) - self.update_parsed_node(node, config) + self.update_parsed_node_config(node, config) except ValidationError as exc: # we got a ValidationError - probably bad types in config() msg = validator_error_message(exc) @@ -674,7 +649,14 @@ def parse(self) -> List[TestBlock]: if is_override: data['path'] = self.yaml.path.original_file_path patch = self._target_from_dict(SourcePatch, data) - self.manifest.add_source_patch(self.yaml.file, patch) + assert isinstance(self.yaml.file, SchemaSourceFile) + source_file = self.yaml.file + # source patches must be unique + key = (patch.overrides, patch.name) + if key in self.manifest.source_patches: + raise_duplicate_source_patch_name(patch, self.manifest.source_patches[key]) + self.manifest.source_patches[key] = patch + source_file.source_patches.append(key) else: source = self._target_from_dict(UnparsedSourceDefinition, data) self.add_source_definitions(source) @@ -771,6 +753,9 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: # target_type: UnparsedNodeUpdate, UnparsedAnalysisUpdate, # or UnparsedMacroUpdate self._target_type().validate(data) + if self.key != 'macros': + # macros don't have the 'config' key support yet + self.normalize_meta_attribute(data, path) node = self._target_type().from_dict(data) except (ValidationError, JSONValidationException) as exc: msg = error_context(path, self.key, data, exc) @@ -778,6 +763,33 @@ def get_unparsed_target(self) -> Iterable[NonSourceTarget]: else: yield node + # We want to raise an error if 'meta' is in two places, and move 'meta' + # from toplevel to config if necessary + def normalize_meta_attribute(self, data, path): + if 'meta' in data: + if 'config' in data and 'meta' in data['config']: + raise CompilationException(f""" + In {path}: found meta dictionary in 'config' dictionary and as top-level key. + Remove the top-level key and define it under 'config' dictionary only. + """.strip()) + else: + if 'config' not in data: + data['config'] = {} + data['config']['meta'] = data.pop('meta') + + def patch_node_config(self, node, patch): + # Get the ContextConfig that's used in calculating the config + # This must match the model resource_type that's being patched + config = ContextConfig( + self.schema_parser.root_project, + node.fqn, + node.resource_type, + self.schema_parser.project.project_name, + ) + # We need to re-apply the config_call_dict after the patch config + config._config_call_dict = node.config_call_dict + self.schema_parser.update_parsed_node_config(node, config, patch_config_dict=patch.config) + class NodePatchParser( NonSourceParser[NodeTarget, ParsedNodePatch], @@ -786,6 +798,9 @@ class NodePatchParser( def parse_patch( self, block: TargetBlock[NodeTarget], refs: ParserRef ) -> None: + # We're not passing the ParsedNodePatch around anymore, so we + # could possibly skip creating one. Leaving here for now for + # code consistency. patch = ParsedNodePatch( name=block.target.name, original_file_path=block.target.original_file_path, @@ -795,8 +810,35 @@ def parse_patch( columns=refs.column_info, meta=block.target.meta, docs=block.target.docs, + config=block.target.config, ) - self.manifest.add_patch(self.yaml.file, patch) + assert isinstance(self.yaml.file, SchemaSourceFile) + source_file: SchemaSourceFile = self.yaml.file + if patch.yaml_key in ['models', 'seeds', 'snapshots']: + unique_id = self.manifest.ref_lookup.get_unique_id(patch.name, None) + elif patch.yaml_key == 'analyses': + unique_id = self.manifest.analysis_lookup.get_unique_id(patch.name, None) + else: + raise InternalException( + f'Unexpected yaml_key {patch.yaml_key} for patch in ' + f'file {source_file.path.original_file_path}' + ) + if unique_id is None: + # This will usually happen when a node is disabled + return + + # patches can't be overwritten + node = self.manifest.nodes.get(unique_id) + if node: + if node.patch_path: + package_name, existing_file_path = node.patch_path.split('://') + raise_duplicate_patch_name(patch, existing_file_path) + source_file.append_patch(patch.yaml_key, unique_id) + # If this patch has config changes, re-calculate the node config + # with the patch config + if patch.config: + self.patch_node_config(node, patch) + node.patch(patch) class TestablePatchParser(NodePatchParser[UnparsedNodeUpdate]): @@ -834,8 +876,24 @@ def parse_patch( description=block.target.description, meta=block.target.meta, docs=block.target.docs, + config=block.target.config, ) - self.manifest.add_macro_patch(self.yaml.file, patch) + assert isinstance(self.yaml.file, SchemaSourceFile) + source_file = self.yaml.file + # macros are fully namespaced + unique_id = f'macro.{patch.package_name}.{patch.name}' + macro = self.manifest.macros.get(unique_id) + if not macro: + warn_or_error( + f'WARNING: Found patch for macro "{patch.name}" ' + f'which was not found' + ) + return + if macro.patch_path: + package_name, existing_file_path = macro.patch_path.split('://') + raise_duplicate_macro_patch_name(patch, existing_file_path) + source_file.macro_patches[patch.name] = unique_id + macro.patch(patch) class ExposureParser(YamlReader): diff --git a/test/integration/029_docs_generate_tests/test_docs_generate.py b/test/integration/029_docs_generate_tests/test_docs_generate.py index eb2fb1cd51f..c1f37acba09 100644 --- a/test/integration/029_docs_generate_tests/test_docs_generate.py +++ b/test/integration/029_docs_generate_tests/test_docs_generate.py @@ -903,13 +903,13 @@ def rendered_model_config(self, **updates): 'materialized': 'view', 'pre-hook': [], 'post-hook': [], - 'vars': {}, 'column_types': {}, 'quoting': {}, 'tags': [], 'persist_docs': {}, 'full_refresh': None, 'on_schema_change': 'ignore', + 'meta': {}, } result.update(updates) return result @@ -924,7 +924,6 @@ def rendered_seed_config(self, **updates): 'persist_docs': {}, 'pre-hook': [], 'post-hook': [], - 'vars': {}, 'column_types': {}, 'quoting': {}, 'tags': [], @@ -934,6 +933,7 @@ def rendered_seed_config(self, **updates): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, } result.update(updates) return result @@ -952,7 +952,6 @@ def rendered_snapshot_config(self, **updates): 'materialized': 'snapshot', 'pre-hook': [], 'post-hook': [], - 'vars': {}, 'column_types': {}, 'quoting': {}, 'tags': [], @@ -963,6 +962,7 @@ def rendered_snapshot_config(self, **updates): 'check_cols': 'all', 'unique_key': 'id', 'target_schema': None, + 'meta': {}, } result.update(updates) return result @@ -979,14 +979,8 @@ def unrendered_snapshot_config(self, **updates): def rendered_tst_config(self, **updates): result = { - 'column_types': {}, 'enabled': True, 'materialized': 'test', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'quoting': {}, - 'vars': {}, 'tags': [], 'severity': 'ERROR', 'store_failures': None, @@ -995,11 +989,10 @@ def rendered_tst_config(self, **updates): 'fail_calc': 'count(*)', 'where': None, 'limit': None, - 'full_refresh': None, - 'on_schema_change': 'ignore', 'database': None, 'schema': 'dbt_test__audit', 'alias': None, + 'meta': {}, } result.update(updates) return result diff --git a/test/integration/039_config_test/data-alt/some_seed.csv b/test/integration/039_config_test/data-alt/some_seed.csv new file mode 100644 index 00000000000..83f9676727c --- /dev/null +++ b/test/integration/039_config_test/data-alt/some_seed.csv @@ -0,0 +1,2 @@ +id,value +4,2 diff --git a/test/integration/039_config_test/extra-alt/untagged.yml b/test/integration/039_config_test/extra-alt/untagged.yml new file mode 100644 index 00000000000..410c4057963 --- /dev/null +++ b/test/integration/039_config_test/extra-alt/untagged.yml @@ -0,0 +1,11 @@ +version: 2 + +models: + - name: untagged + description: "This is a model description" + meta: + owner: 'Somebody Else' + config: + meta: + owner: 'Julie Smith' + diff --git a/test/integration/039_config_test/extra-alt/untagged2.yml b/test/integration/039_config_test/extra-alt/untagged2.yml new file mode 100644 index 00000000000..0e8cc6de7d0 --- /dev/null +++ b/test/integration/039_config_test/extra-alt/untagged2.yml @@ -0,0 +1,11 @@ +version: 2 + +models: + - name: untagged + description: "This is a model description" + tests: + - not_null: + error_if: ">2" + config: + error_if: ">2" + diff --git a/test/integration/039_config_test/macros-alt/my_macro.sql b/test/integration/039_config_test/macros-alt/my_macro.sql new file mode 100644 index 00000000000..79c941d7518 --- /dev/null +++ b/test/integration/039_config_test/macros-alt/my_macro.sql @@ -0,0 +1,8 @@ +{% macro do_something2(foo2, bar2) %} + + select + '{{ foo2 }}' as foo2, + '{{ bar2 }}' as bar2 + +{% endmacro %} + diff --git a/test/integration/039_config_test/macros-alt/schema.yml b/test/integration/039_config_test/macros-alt/schema.yml new file mode 100644 index 00000000000..317c9a0546e --- /dev/null +++ b/test/integration/039_config_test/macros-alt/schema.yml @@ -0,0 +1,5 @@ +macros: + - name: my_macro + config: + meta: + owner: 'Joe Jones' diff --git a/test/integration/039_config_test/models-alt/schema.yml b/test/integration/039_config_test/models-alt/schema.yml new file mode 100644 index 00000000000..d0937a6235a --- /dev/null +++ b/test/integration/039_config_test/models-alt/schema.yml @@ -0,0 +1,31 @@ +version: 2 +sources: + - name: raw + database: "{{ target.database }}" + schema: "{{ target.schema }}" + tables: + - name: 'some_seed' + columns: + - name: id + +models: + - name: model + description: "This is a model description" + config: + tags: ['tag_in_schema'] + meta: + owner: 'Julie Smith' + my_attr: "{{ var('my_var') }}" + materialization: view + + columns: + - name: id + tests: + - not_null: + meta: + owner: 'Simple Simon' + - unique: + config: + meta: + owner: 'John Doe' + diff --git a/test/integration/039_config_test/models-alt/tagged/model.sql b/test/integration/039_config_test/models-alt/tagged/model.sql new file mode 100644 index 00000000000..6dbc83c3d6e --- /dev/null +++ b/test/integration/039_config_test/models-alt/tagged/model.sql @@ -0,0 +1,15 @@ +{{ + config( + materialized='view', + tags=['tag_1_in_model'], + ) +}} + +{{ + config( + materialized='table', + tags=['tag_2_in_model'], + ) +}} + +select 4 as id, 2 as value diff --git a/test/integration/039_config_test/models-alt/untagged.sql b/test/integration/039_config_test/models-alt/untagged.sql new file mode 100644 index 00000000000..a0dce3f086f --- /dev/null +++ b/test/integration/039_config_test/models-alt/untagged.sql @@ -0,0 +1,5 @@ +{{ + config(materialized='table') +}} + +select id, value from {{ source('raw', 'some_seed') }} diff --git a/test/integration/039_config_test/test_configs_in_schema_files.py b/test/integration/039_config_test/test_configs_in_schema_files.py new file mode 100644 index 00000000000..cff3ca9a3a9 --- /dev/null +++ b/test/integration/039_config_test/test_configs_in_schema_files.py @@ -0,0 +1,112 @@ +import os +import shutil + +from test.integration.base import DBTIntegrationTest, use_profile, get_manifest, normalize +from dbt.exceptions import CompilationException + + +class TestSchemaFileConfigs(DBTIntegrationTest): + @property + def schema(self): + return "config_039-alt" + + def unique_schema(self): + return super().unique_schema().upper() + + @property + def project_config(self): + return { + 'config-version': 2, + 'data-paths': ['data-alt'], + 'models': { + '+meta': { + 'company': 'NuMade', + }, + 'test': { + '+meta': { + 'project': 'test', + }, + 'tagged': { + '+meta': { + 'team': 'Core Team', + }, + 'tags': ['tag_in_project'], + 'model': { + 'materialized': 'table', + '+meta': { + 'owner': 'Julie Dent', + }, + } + } + }, + }, + 'vars': { + 'test': { + 'my_var': 'TESTING', + } + }, + 'seeds': { + 'quote_columns': False, + }, + } + + @property + def models(self): + return "models-alt" + + @use_profile('postgres') + def test_postgres_config_layering(self): + self.assertEqual(len(self.run_dbt(['seed'])), 1) + # test the project-level tag, and both config() call tags + self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_in_project'])), 1) + self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_1_in_model'])), 1) + self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_2_in_model'])), 1) + self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_in_schema'])), 1) + manifest = get_manifest() + model_id = 'model.test.model' + model_node = manifest.nodes[model_id] + model_tags = ['tag_1_in_model', 'tag_2_in_model', 'tag_in_project', 'tag_in_schema'] + model_node_tags = model_node.tags.copy() + model_node_tags.sort() + self.assertEqual(model_node_tags, model_tags) + model_node_config_tags = model_node.config.tags.copy() + model_node_config_tags.sort() + self.assertEqual(model_node_config_tags, model_tags) + model_meta = { + 'company': 'NuMade', + 'project': 'test', + 'team': 'Core Team', + 'owner': 'Julie Smith', + 'my_attr': "TESTING", + } + self.assertEqual(model_node.config.meta, model_meta) + # make sure we overwrote the materialization properly + models = self.get_models_in_schema() + self.assertEqual(models['model'], 'table') + self.assertTablesEqual('some_seed', 'model') + # look for test meta + schema_file_id = model_node.patch_path + schema_file = manifest.files[schema_file_id] + tests = schema_file.get_tests('models', 'model') + self.assertIn(tests[0], manifest.nodes) + test = manifest.nodes[tests[0]] + expected_meta = {'owner': 'Simple Simon'} + self.assertEqual(test.config.meta, expected_meta) + test = manifest.nodes[tests[1]] + expected_meta = {'owner': 'John Doe'} + self.assertEqual(test.config.meta, expected_meta) + + # copy a schema file with multiple metas + shutil.copyfile('extra-alt/untagged.yml', 'models-alt/untagged.yml') + with self.assertRaises(CompilationException): + results = self.run_dbt(["run"]) + + # copy a schema file with config key in top-level of test and in config dict + shutil.copyfile('extra-alt/untagged2.yml', 'models-alt/untagged.yml') + with self.assertRaises(CompilationException): + results = self.run_dbt(["run"]) + + def tearDown(self): + if os.path.exists(normalize('models-alt/untagged.yml')): + os.remove(normalize('models-alt/untagged.yml')) + diff --git a/test/integration/047_dbt_ls_test/test_ls.py b/test/integration/047_dbt_ls_test/test_ls.py index d7fb1a05593..34ebbcc1b19 100644 --- a/test/integration/047_dbt_ls_test/test_ls.py +++ b/test/integration/047_dbt_ls_test/test_ls.py @@ -81,7 +81,6 @@ def expect_snapshot_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'target_database': self.default_database, @@ -95,6 +94,7 @@ def expect_snapshot_output(self): 'alias': None, 'check_cols': None, 'on_schema_change': 'ignore', + 'meta': {}, }, 'unique_id': 'snapshot.test.my_snapshot', 'original_file_path': normalize('snapshots/snapshot.sql'), @@ -121,7 +121,6 @@ def expect_analyses_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -129,6 +128,7 @@ def expect_analyses_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'unique_id': 'analysis.test.a', 'original_file_path': normalize('analyses/a.sql'), @@ -156,7 +156,6 @@ def expect_model_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -164,6 +163,7 @@ def expect_model_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'original_file_path': normalize('models/ephemeral.sql'), 'unique_id': 'model.test.ephemeral', @@ -182,7 +182,6 @@ def expect_model_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -191,6 +190,7 @@ def expect_model_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'original_file_path': normalize('models/incremental.sql'), 'unique_id': 'model.test.incremental', @@ -209,7 +209,6 @@ def expect_model_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -217,6 +216,7 @@ def expect_model_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'original_file_path': normalize('models/sub/inner.sql'), 'unique_id': 'model.test.inner', @@ -235,7 +235,6 @@ def expect_model_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -243,6 +242,7 @@ def expect_model_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'original_file_path': normalize('models/outer.sql'), 'unique_id': 'model.test.outer', @@ -272,7 +272,6 @@ def expect_model_ephemeral_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'full_refresh': None, @@ -280,6 +279,7 @@ def expect_model_ephemeral_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'unique_id': 'model.test.ephemeral', 'original_file_path': normalize('models/ephemeral.sql'), @@ -329,7 +329,6 @@ def expect_seed_output(self): 'tags': [], 'pre-hook': [], 'quoting': {}, - 'vars': {}, 'column_types': {}, 'persist_docs': {}, 'quote_columns': False, @@ -338,6 +337,7 @@ def expect_seed_output(self): 'database': None, 'schema': None, 'alias': None, + 'meta': {}, }, 'unique_id': 'seed.test.seed', 'original_file_path': normalize('data/seed.csv'), @@ -361,7 +361,6 @@ def expect_test_output(self): 'config': { 'enabled': True, 'materialized': 'test', - 'post-hook': [], 'severity': 'ERROR', 'store_failures': None, 'warn_if': '!= 0', @@ -370,16 +369,10 @@ def expect_test_output(self): 'where': None, 'limit': None, 'tags': [], - 'pre-hook': [], - 'quoting': {}, - 'vars': {}, - 'column_types': {}, - 'persist_docs': {}, - 'full_refresh': None, - 'on_schema_change': 'ignore', 'database': None, 'schema': 'dbt_test__audit', 'alias': None, + 'meta': {}, }, 'unique_id': 'test.test.not_null_outer_id.e5db1d4aad', 'original_file_path': normalize('models/schema.yml'), @@ -394,7 +387,6 @@ def expect_test_output(self): 'config': { 'enabled': True, 'materialized': 'test', - 'post-hook': [], 'severity': 'ERROR', 'store_failures': None, 'warn_if': '!= 0', @@ -403,16 +395,10 @@ def expect_test_output(self): 'where': None, 'limit': None, 'tags': [], - 'pre-hook': [], - 'quoting': {}, - 'vars': {}, - 'column_types': {}, - 'persist_docs': {}, - 'full_refresh': None, - 'on_schema_change': 'ignore', 'database': None, 'schema': 'dbt_test__audit', 'alias': None, + 'meta': {}, }, 'unique_id': 'test.test.t', 'original_file_path': normalize('tests/t.sql'), @@ -427,7 +413,6 @@ def expect_test_output(self): 'config': { 'enabled': True, 'materialized': 'test', - 'post-hook': [], 'severity': 'ERROR', 'store_failures': None, 'warn_if': '!= 0', @@ -436,16 +421,10 @@ def expect_test_output(self): 'where': None, 'limit': None, 'tags': [], - 'pre-hook': [], - 'quoting': {}, - 'vars': {}, - 'column_types': {}, - 'persist_docs': {}, - 'full_refresh': None, - 'on_schema_change': 'ignore', 'database': None, 'schema': 'dbt_test__audit', 'alias': None, + 'meta': {}, }, 'unique_id': 'test.test.unique_outer_id.615b011076', 'original_file_path': normalize('models/schema.yml'), diff --git a/test/integration/base.py b/test/integration/base.py index 463948de4b8..27e5333b43f 100644 --- a/test/integration/base.py +++ b/test/integration/base.py @@ -24,6 +24,7 @@ from dbt.config import RuntimeConfig from dbt.context import providers from dbt.logger import GLOBAL_LOGGER as logger, log_manager +from dbt.contracts.graph.manifest import Manifest INITIAL_ROOT = os.getcwd() @@ -1224,9 +1225,22 @@ def __eq__(self, other): def __repr__(self): return 'AnyStringWith<{!r}>'.format(self.contains) + def bigquery_rate_limiter(err, *args): msg = str(err) if 'too many table update operations for this table' in msg: time.sleep(1) return True return False + + +def get_manifest(): + path = './target/partial_parse.msgpack' + if os.path.exists(path): + with open(path, 'rb') as fp: + manifest_mp = fp.read() + manifest: Manifest = Manifest.from_msgpack(manifest_mp) + return manifest + else: + return None + diff --git a/test/unit/test_contracts_graph_compiled.py b/test/unit/test_contracts_graph_compiled.py index 041fae3f3cf..7974d37226e 100644 --- a/test/unit/test_contracts_graph_compiled.py +++ b/test/unit/test_contracts_graph_compiled.py @@ -136,8 +136,8 @@ def basic_uncompiled_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -181,8 +181,8 @@ def basic_compiled_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -203,7 +203,6 @@ def test_basic_uncompiled_model(minimal_uncompiled_dict, basic_uncompiled_dict, assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert node.local_vars() == {} assert_from_dict(node, minimal_uncompiled_dict, CompiledModelNode) pickle.loads(pickle.dumps(node)) @@ -216,7 +215,6 @@ def test_basic_compiled_model(basic_compiled_dict, basic_compiled_model): assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert node.local_vars() == {} def test_invalid_extra_fields_model(minimal_uncompiled_dict): @@ -425,21 +423,15 @@ def basic_uncompiled_schema_test_dict(): 'alias': 'bar', 'tags': [], 'config': { - 'column_types': {}, 'enabled': True, 'materialized': 'test', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'quoting': {}, 'tags': [], - 'vars': {}, 'severity': 'ERROR', 'schema': 'dbt_test__audit', 'warn_if': '!= 0', 'error_if': '!= 0', 'fail_calc': 'count(*)', - 'on_schema_change': 'ignore', + 'meta': {}, }, 'deferred': False, 'docs': {'show': True}, @@ -480,21 +472,15 @@ def basic_compiled_schema_test_dict(): 'alias': 'bar', 'tags': [], 'config': { - 'column_types': {}, 'enabled': True, 'materialized': 'test', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'quoting': {}, 'tags': [], - 'vars': {}, 'severity': 'warn', 'schema': 'dbt_test__audit', 'warn_if': '!= 0', 'error_if': '!= 0', 'fail_calc': 'count(*)', - 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -524,7 +510,6 @@ def test_basic_uncompiled_schema_test(basic_uncompiled_schema_test_node, basic_u assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert node.local_vars() == {} assert_from_dict(node, minimum, CompiledSchemaTestNode) @@ -537,7 +522,6 @@ def test_basic_compiled_schema_test(basic_compiled_schema_test_node, basic_compi assert node.empty is False assert node.is_refable is False assert node.is_ephemeral is False - assert node.local_vars() == {} def test_invalid_extra_schema_test_fields(minimal_schema_test_dict): diff --git a/test/unit/test_contracts_graph_parsed.py b/test/unit/test_contracts_graph_parsed.py index 38a539ae4eb..3b6d01fa89f 100644 --- a/test/unit/test_contracts_graph_parsed.py +++ b/test/unit/test_contracts_graph_parsed.py @@ -74,9 +74,9 @@ def populated_node_config_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'extra': 'even more', 'on_schema_change': 'ignore', + 'meta': {}, } @@ -154,8 +154,8 @@ def base_parsed_model_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'deferred': False, 'docs': {'show': True}, @@ -245,8 +245,8 @@ def complex_parsed_model_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {'foo': 100}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': { @@ -262,7 +262,6 @@ def complex_parsed_model_dict(): 'column_types': {'a': 'text'}, 'materialized': 'ephemeral', 'post_hook': ['insert into blah(a, b) select "1", 1'], - 'vars': {'foo': 100}, }, } @@ -293,7 +292,6 @@ def complex_parsed_model_object(): column_types={'a': 'text'}, materialized='ephemeral', post_hook=[Hook(sql='insert into blah(a, b) select "1", 1')], - vars={'foo': 100}, ), columns={'a': ColumnInfo('a', 'a text field', {})}, checksum=FileHash.from_contents(''), @@ -301,7 +299,6 @@ def complex_parsed_model_object(): 'column_types': {'a': 'text'}, 'materialized': 'ephemeral', 'post_hook': ['insert into blah(a, b) select "1", 1'], - 'vars': {'foo': 100}, }, ) @@ -313,7 +310,6 @@ def test_model_basic(basic_parsed_model_object, base_parsed_model_dict, minimal_ assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is False - assert node.local_vars() == {} minimum = minimal_parsed_model_dict assert_from_dict(node, minimum) @@ -327,7 +323,6 @@ def test_model_complex(complex_parsed_model_object, complex_parsed_model_dict): assert node.empty is False assert node.is_refable is True assert node.is_ephemeral is True - assert node.local_vars() == {'foo': 100} def test_invalid_bad_tags(base_parsed_model_dict): @@ -441,8 +436,8 @@ def basic_parsed_seed_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'deferred': False, 'docs': {'show': True}, @@ -534,9 +529,9 @@ def complex_parsed_seed_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'quote_columns': True, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'deferred': False, 'docs': {'show': True}, @@ -678,6 +673,7 @@ def basic_parsed_model_patch_dict(): 'tags': [], }, }, + 'config': {}, } @@ -692,6 +688,7 @@ def basic_parsed_model_patch_object(): columns={'a': ColumnInfo(name='a', description='a text field', meta={})}, docs=Docs(), meta={}, + config={}, ) @@ -788,8 +785,8 @@ def base_parsed_hook_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -859,8 +856,8 @@ def complex_parsed_hook_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': { @@ -994,21 +991,15 @@ def basic_parsed_schema_test_dict(): 'tags': [], 'meta': {}, 'config': { - 'column_types': {}, 'enabled': True, 'materialized': 'test', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'quoting': {}, 'tags': [], - 'vars': {}, 'severity': 'ERROR', - 'schema': 'dbt_test__audit', 'warn_if': '!= 0', 'error_if': '!= 0', 'fail_calc': 'count(*)', - 'on_schema_change': 'ignore', + 'meta': {}, + 'schema': 'dbt_test__audit', }, 'docs': {'show': True}, 'columns': {}, @@ -1072,22 +1063,16 @@ def complex_parsed_schema_test_dict(): 'tags': ['tag'], 'meta': {}, 'config': { - 'column_types': {'a': 'text'}, 'enabled': True, 'materialized': 'table', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'quoting': {}, 'tags': [], - 'vars': {}, 'severity': 'WARN', - 'schema': 'dbt_test__audit', 'warn_if': '!= 0', 'error_if': '!= 0', 'fail_calc': 'count(*)', 'extra_key': 'extra value', - 'on_schema_change': 'ignore', + 'meta': {}, + 'schema': 'dbt_test__audit', }, 'docs': {'show': False}, 'columns': { @@ -1105,7 +1090,6 @@ def complex_parsed_schema_test_dict(): }, 'checksum': {'name': 'sha256', 'checksum': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'}, 'unrendered_config': { - 'column_types': {'a': 'text'}, 'materialized': 'table', 'severity': 'WARN' }, @@ -1115,7 +1099,6 @@ def complex_parsed_schema_test_dict(): @pytest.fixture def complex_parsed_schema_test_object(): cfg = TestConfig( - column_types={'a': 'text'}, materialized='table', severity='WARN' ) @@ -1146,7 +1129,6 @@ def complex_parsed_schema_test_object(): test_metadata=TestMetadata(namespace=None, name='foo', kwargs={}), checksum=FileHash.from_contents(''), unrendered_config={ - 'column_types': {'a': 'text'}, 'materialized': 'table', 'severity': 'WARN' }, @@ -1201,13 +1183,13 @@ def basic_timestamp_snapshot_config_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'unique_key': 'id', 'strategy': 'timestamp', 'updated_at': 'last_update', 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'on_schema_change': 'ignore', + 'meta': {}, } @@ -1233,7 +1215,6 @@ def complex_timestamp_snapshot_config_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'unique_key': 'id', @@ -1241,6 +1222,7 @@ def complex_timestamp_snapshot_config_dict(): 'strategy': 'timestamp', 'updated_at': 'last_update', 'on_schema_change': 'ignore', + 'meta': {}, } @@ -1291,13 +1273,13 @@ def basic_check_snapshot_config_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'unique_key': 'id', 'strategy': 'check', 'check_cols': 'all', 'on_schema_change': 'ignore', + 'meta': {}, } @@ -1323,7 +1305,6 @@ def complex_set_snapshot_config_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'unique_key': 'id', @@ -1331,6 +1312,7 @@ def complex_set_snapshot_config_dict(): 'strategy': 'check', 'check_cols': ['a', 'b'], 'on_schema_change': 'ignore', + 'meta': {}, } @@ -1430,13 +1412,13 @@ def basic_timestamp_snapshot_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'unique_key': 'id', 'strategy': 'timestamp', 'updated_at': 'last_update', - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -1562,13 +1544,13 @@ def basic_check_snapshot_dict(): 'pre-hook': [], 'quoting': {}, 'tags': [], - 'vars': {}, 'target_database': 'some_snapshot_db', 'target_schema': 'some_snapshot_schema', 'unique_key': 'id', 'strategy': 'check', 'check_cols': 'all', - 'on_schema_change': 'ignore' + 'on_schema_change': 'ignore', + 'meta': {}, }, 'docs': {'show': True}, 'columns': {}, @@ -1717,6 +1699,7 @@ def populated_parsed_node_patch_dict(): 'meta': {'key': ['value']}, 'yaml_key': 'models', 'package_name': 'test', + 'config': {}, } @@ -1731,6 +1714,7 @@ def populated_parsed_node_patch_object(): yaml_key='models', package_name='test', docs=Docs(show=False), + config={}, ) @@ -1778,7 +1762,6 @@ def test_ok(self): arguments=[], ) assert_symmetric(macro, macro_dict) - self.assertEqual(macro.local_vars(), {}) pickle.loads(pickle.dumps(macro)) def test_invalid_missing_unique_id(self): diff --git a/test/unit/test_contracts_graph_unparsed.py b/test/unit/test_contracts_graph_unparsed.py index a15ba1e16c6..26b2bc3b947 100644 --- a/test/unit/test_contracts_graph_unparsed.py +++ b/test/unit/test_contracts_graph_unparsed.py @@ -258,6 +258,7 @@ def test_defaults(self): 'loader': '', 'meta': {}, 'tags': [], + 'config': {}, } self.assert_from_dict(minimum, from_dict) self.assert_to_dict(minimum, to_dict) @@ -281,6 +282,7 @@ def test_contents(self): 'tables': [], 'meta': {}, 'tags': [], + 'config': {}, } self.assert_symmetric(empty, dct) @@ -338,6 +340,7 @@ def test_table_defaults(self): }, ], 'tags': [], + 'config': {}, } self.assert_from_dict(source, from_dict) self.assert_symmetric(source, to_dict) @@ -406,6 +409,7 @@ def test_defaults(self): 'docs': {'show': True}, 'tests': [], 'meta': {}, + 'config': {}, } self.assert_from_dict(minimum, from_dict) self.assert_to_dict(minimum, to_dict) @@ -468,6 +472,7 @@ def test_contents(self): }, ], 'docs': {'show': False}, + 'config': {}, } self.assert_symmetric(update, dct) pickle.loads(pickle.dumps(update))