Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(chart): allow null for optional query object props #12905

Merged
merged 1 commit into from
Feb 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 18 additions & 5 deletions superset/charts/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -873,18 +873,20 @@ class Meta: # pylint: disable=too-few-public-methods
)
applied_time_extras = fields.Dict(
description="A mapping of temporal extras that have been applied to the query",
required=False,
allow_none=True,
example={"__time_range": "1 year ago : now"},
)
filters = fields.List(fields.Nested(ChartDataFilterSchema), required=False)
filters = fields.List(fields.Nested(ChartDataFilterSchema), allow_none=True)
granularity = fields.String(
description="Name of temporal column used for time filtering. For legacy Druid "
"datasources this defines the time grain.",
allow_none=True,
)
granularity_sqla = fields.String(
description="Name of temporal column used for time filtering for SQL "
"datasources. This field is deprecated, use `granularity` "
"instead.",
allow_none=True,
deprecated=True,
)
groupby = fields.List(
Expand All @@ -897,9 +899,11 @@ class Meta: # pylint: disable=too-few-public-methods
"references to datasource metrics (strings), or ad-hoc metrics"
"which are defined only within the query object. See "
"`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
allow_none=True,
)
post_processing = fields.List(
fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True),
allow_none=True,
description="Post processing operations to be applied to the result set. "
"Operations are applied to the result set in sequential order.",
)
Expand All @@ -923,40 +927,45 @@ class Meta: # pylint: disable=too-few-public-methods
"- Last X seconds/minutes/hours/days/weeks/months/years\n"
"- Next X seconds/minutes/hours/days/weeks/months/years\n",
example="Last week",
allow_none=True,
)
time_shift = fields.String(
description="A human-readable date/time string. "
"Please refer to [parsdatetime](https://github.com/bear/parsedatetime) "
"documentation for details on valid values.",
allow_none=True,
)
is_timeseries = fields.Boolean(
description="Is the `query_object` a timeseries.", required=False
description="Is the `query_object` a timeseries.", allow_none=True,
)
timeseries_limit = fields.Integer(
description="Maximum row count for timeseries queries. Default: `0`",
allow_none=True,
)
timeseries_limit_metric = fields.Raw(
description="Metric used to limit timeseries queries by.", allow_none=True,
)
row_limit = fields.Integer(
description='Maximum row count. Default: `config["ROW_LIMIT"]`',
allow_none=True,
validate=[
Range(min=1, error=_("`row_limit` must be greater than or equal to 1"))
],
)
row_offset = fields.Integer(
description="Number of rows to skip. Default: `0`",
allow_none=True,
validate=[
Range(min=0, error=_("`row_offset` must be greater than or equal to 0"))
],
)
order_desc = fields.Boolean(
description="Reverse order. Default: `false`", required=False
description="Reverse order. Default: `false`", allow_none=True,
)
extras = fields.Nested(
ChartDataExtrasSchema,
description="Extra parameters to add to the query.",
required=False,
allow_none=True,
)
columns = fields.List(
fields.String(),
Expand All @@ -967,24 +976,28 @@ class Meta: # pylint: disable=too-few-public-methods
fields.List(fields.Raw()),
description="Expects a list of lists where the first element is the column "
"name which to sort by, and the second element is a boolean.",
allow_none=True,
example=[["my_col_1", False], ["my_col_2", True]],
)
where = fields.String(
description="WHERE clause to be added to queries using AND operator."
"This field is deprecated and should be passed to `extras`.",
allow_none=True,
deprecated=True,
)
having = fields.String(
description="HAVING clause to be added to aggregate queries using "
"AND operator. This field is deprecated and should be passed "
"to `extras`.",
allow_none=True,
deprecated=True,
)
having_filters = fields.List(
fields.Nested(ChartDataFilterSchema),
description="HAVING filters to be added to legacy Druid datasource queries. "
"This field is deprecated and should be passed to `extras` "
"as `having_druid`.",
allow_none=True,
deprecated=True,
)
druid_time_origin = fields.String(
Expand Down
18 changes: 18 additions & 0 deletions tests/query_context_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

from superset import db
from superset.charts.schemas import ChartDataQueryContextSchema
from superset.common.query_context import QueryContext
from superset.common.query_object import QueryObject
from superset.connectors.connector_registry import ConnectorRegistry
from superset.extensions import cache_manager
from superset.models.cache import CacheKey
Expand Down Expand Up @@ -126,6 +128,22 @@ def test_query_cache_key_changes_when_datasource_is_updated(self):
# the new cache_key should be different due to updated datasource
self.assertNotEqual(cache_key_original, cache_key_new)

def test_query_cache_key_does_not_change_for_non_existent_or_null(self):
self.login(username="admin")
payload = get_query_context("birth_names", add_postprocessing_operations=True)
del payload["queries"][0]["granularity"]

# construct baseline query_cache_key from query_context with post processing operation
query_context: QueryContext = ChartDataQueryContextSchema().load(payload)
query_object: QueryObject = query_context.queries[0]
cache_key_original = query_context.query_cache_key(query_object)

payload["queries"][0]["granularity"] = None
query_context = ChartDataQueryContextSchema().load(payload)
query_object = query_context.queries[0]

assert query_context.query_cache_key(query_object) == cache_key_original

def test_query_cache_key_changes_when_post_processing_is_updated(self):
self.login(username="admin")
payload = get_query_context("birth_names", add_postprocessing_operations=True)
Expand Down