diff --git a/sdk/monitor/azure-monitor-query/CHANGELOG.md b/sdk/monitor/azure-monitor-query/CHANGELOG.md
index ea4ae667b247..5805d4278e0b 100644
--- a/sdk/monitor/azure-monitor-query/CHANGELOG.md
+++ b/sdk/monitor/azure-monitor-query/CHANGELOG.md
@@ -8,6 +8,7 @@
- Added a `MetricClass` enum to provide the class of a metric.
- Added a `metric_class` attribute to the `MetricDefinition` type.
- Added a `MetricNamespaceClassification` enum to support the `namespace_classification` attribute on `MetricNamespace` type.
+- Added a `MetricUnit` enum to describe the unit of the metric.
### Breaking Changes
@@ -30,6 +31,10 @@
- Removed `LogsBatchResultError` type.
- `LogsQueryResultTable` is named to `LogsTable`
- `LogsQueryResultColumn` is renamed to `LogsTableColumn`
+- `LogsTableColumn` is now removed. Column labels are strings instead.
+- `start_time` in `list_metric_namespaces` API is now a datetime.
+- The order of params in `LogsBatchQuery` is changed. Also, `headers` is no longer accepted.
+- `timespan` is now a required keyword-only argument in logs APIs.
### Bugs Fixed
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/__init__.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/__init__.py
index 386aec140be1..d201fbff31bb 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/__init__.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/__init__.py
@@ -12,12 +12,12 @@
LogsBatchQueryResult,
LogsQueryResult,
LogsTable,
- LogsTableColumn,
MetricsResult,
LogsBatchQuery,
MetricNamespace,
MetricNamespaceClassification,
MetricDefinition,
+ MetricUnit,
TimeSeriesElement,
Metric,
MetricValue,
@@ -32,13 +32,13 @@
"LogsQueryClient",
"LogsBatchQueryResult",
"LogsQueryResult",
- "LogsTableColumn",
"LogsTable",
"LogsBatchQuery",
"MetricsQueryClient",
"MetricNamespace",
"MetricNamespaceClassification",
"MetricDefinition",
+ "MetricUnit",
"MetricsResult",
"TimeSeriesElement",
"Metric",
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
index 205239d1816f..cda750cd845e 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_helpers.py
@@ -90,4 +90,4 @@ def native_col_type(col_type, value):
return value
def process_row(col_types, row):
- return [native_col_type(col_types[ind].type, val) for ind, val in enumerate(row)]
+ return [native_col_type(col_types[ind], val) for ind, val in enumerate(row)]
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
index 7431510fb04d..eaac63c874bc 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py
@@ -5,7 +5,7 @@
# license information.
# --------------------------------------------------------------------------
-from typing import TYPE_CHECKING, Any, Union, Sequence, Dict, Optional
+from typing import TYPE_CHECKING, Any, Union, Sequence, Dict, List
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator import distributed_trace
@@ -17,7 +17,7 @@
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
- from datetime import timedelta
+ from datetime import timedelta, datetime
class LogsQueryClient(object):
@@ -51,8 +51,8 @@ def __init__(self, credential, **kwargs):
self._query_op = self._client.query
@distributed_trace
- def query(self, workspace_id, query, timespan=None, **kwargs):
- # type: (str, str, Optional[timedelta], Any) -> LogsQueryResult
+ def query(self, workspace_id, query, **kwargs):
+ # type: (str, str, Any) -> LogsQueryResult
"""Execute an Analytics query.
Executes an Analytics query for data.
@@ -63,9 +63,9 @@ def query(self, workspace_id, query, timespan=None, **kwargs):
:param query: The Analytics query. Learn more about the `Analytics query syntax
`_.
:type query: str
- :param timespan: The timespan for which to query the data. This can be a timedelta,
+ :keyword timespan: The timespan for which to query the data. This can be a timedelta,
a timedelta and a start datetime, or a start datetime/end datetime.
- :type timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ :paramtype timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
or tuple[~datetime.datetime, ~datetime.datetime]
:keyword int server_timeout: the server timeout in seconds. The default timeout is 3 minutes,
and the maximum timeout is 10 minutes.
@@ -76,7 +76,7 @@ def query(self, workspace_id, query, timespan=None, **kwargs):
:keyword additional_workspaces: A list of workspaces that are included in the query.
These can be qualified workspace names, workspace Ids, or Azure resource Ids.
:paramtype additional_workspaces: list[str]
- :return: QueryResults, or the result of cls(response)
+ :return: LogsQueryResult, or the result of cls(response)
:rtype: ~azure.monitor.query.LogsQueryResult
:raises: ~azure.core.exceptions.HttpResponseError
@@ -89,7 +89,9 @@ def query(self, workspace_id, query, timespan=None, **kwargs):
:dedent: 0
:caption: Get a response for a single Log Query
"""
- timespan = construct_iso8601(timespan)
+ if 'timespan' not in kwargs:
+ raise TypeError("query() missing 1 required keyword-only argument: 'timespan'")
+ timespan = construct_iso8601(kwargs.pop('timespan'))
include_statistics = kwargs.pop("include_statistics", False)
include_visualization = kwargs.pop("include_visualization", False)
server_timeout = kwargs.pop("server_timeout", None)
@@ -126,7 +128,7 @@ def query(self, workspace_id, query, timespan=None, **kwargs):
@distributed_trace
def query_batch(self, queries, **kwargs):
- # type: (Union[Sequence[Dict], Sequence[LogsBatchQuery]], Any) -> Sequence[LogsBatchQueryResult]
+ # type: (Union[Sequence[Dict], Sequence[LogsBatchQuery]], Any) -> List[LogsBatchQueryResult]
"""Execute a list of analytics queries. Each request can be either a LogQueryRequest
object or an equivalent serialized model.
@@ -135,7 +137,7 @@ def query_batch(self, queries, **kwargs):
:param queries: The list of queries that should be processed
:type queries: list[dict] or list[~azure.monitor.query.LogsBatchQuery]
:return: List of LogsBatchQueryResult, or the result of cls(response)
- :rtype: ~list[~azure.monitor.query.LogsBatchQueryResult]
+ :rtype: list[~azure.monitor.query.LogsBatchQueryResult]
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
index aae99348a479..30a99f6f82a0 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_metrics_query_client.py
@@ -8,6 +8,7 @@
# pylint: disable=anomalous-backslash-in-string
from typing import TYPE_CHECKING, Any, Optional
+from msrest.serialization import Serializer
from azure.core.tracing.decorator import distributed_trace
from ._generated._monitor_query_client import (
@@ -59,9 +60,6 @@ def query(self, resource_uri, metric_names, **kwargs):
# type: (str, list, Optional[timedelta], Any) -> MetricsResult
"""Lists the metric values for a resource.
- **Note**: Although the start_time, end_time, duration are optional parameters, it is highly
- recommended to specify the timespan. If not, the entire dataset is queried.
-
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param metric_names: The names of the metrics to retrieve.
@@ -93,9 +91,6 @@ def query(self, resource_uri, metric_names, **kwargs):
‘c1’**\ :code:`
`- Return all time series where A = a1:code:`
`\ **$filter=A eq ‘a1’ and
B eq ‘\ *’ and C eq ‘*\ ’**.
:paramtype filter: str
- :keyword result_type: Reduces the set of data collected. The syntax allowed depends on the
- operation. See the operation's description for details.
- :paramtype result_type: str or ~monitor_query_client.models.ResultType
:keyword metric_namespace: Metric namespace to query metric definitions for.
:paramtype metric_namespace: str
:return: Response, or the result of cls(response)
@@ -131,15 +126,19 @@ def list_metric_namespaces(self, resource_uri, **kwargs):
:param resource_uri: The identifier of the resource.
:type resource_uri: str
- :keyword start_time: The ISO 8601 conform Date start time from which to query for metric
- namespaces.
- :paramtype start_time: str
+ :keyword start_time: The start time from which to query for metric
+ namespaces. This should be provided as a datetime object.
+ :paramtype start_time: ~datetime.datetime
:return: An iterator like instance of either MetricNamespace or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.monitor.query.MetricNamespace]
:raises: ~azure.core.exceptions.HttpResponseError
"""
+ start_time = kwargs.pop('start_time', None)
+ if start_time:
+ start_time = Serializer.serialize_iso(start_time)
return self._namespace_op.list(
resource_uri,
+ start_time,
cls=kwargs.pop(
"cls",
lambda objs: [
@@ -155,12 +154,13 @@ def list_metric_definitions(self, resource_uri, metric_namespace=None, **kwargs)
:param resource_uri: The identifier of the resource.
:type resource_uri: str
- :param metric_namespace: Metric namespace to query metric definitions for.
- :type metric_namespace: str
+ :keyword namespace: Metric namespace to query metric definitions for.
+ :paramtype namespace: str
:return: An iterator like instance of either MetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.monitor.query.MetricDefinition]
:raises: ~azure.core.exceptions.HttpResponseError
"""
+ metric_namespace = kwargs.pop('namespace', None)
return self._definitions_op.list(
resource_uri,
metric_namespace,
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
index 2b3a33672ec6..18cfa1245435 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/_models.py
@@ -11,7 +11,6 @@
from ._helpers import construct_iso8601, process_row
from ._generated.models import (
- Column as InternalColumn,
BatchQueryRequest as InternalLogQueryRequest,
)
@@ -21,49 +20,33 @@ class LogsTable(object):
All required parameters must be populated in order to send to Azure.
- :param name: Required. The name of the table.
- :type name: str
- :param columns: Required. The list of columns in this table.
- :type columns: list[~azure.monitor.query.LogsTableColumn]
- :param rows: Required. The resulting rows from this query.
- :type rows: list[list[str]]
+ :ivar name: Required. The name of the table.
+ :vartype name: str
+ :ivar columns: The labels of columns in this table.
+ :vartype columns: list[str]
+ :ivar column_types: The types of columns in this table.
+ :vartype columns: list[object]
+ :ivar rows: Required. The resulting rows from this query.
+ :vartype rows: list[list[object]]
"""
- def __init__(self, name, columns, rows):
- # type: (str, List[LogsTableColumn], List[List[str]]) -> None
- self.name = name
- self.columns = columns
- self.rows = [process_row(self.columns, row) for row in rows]
+ def __init__(self, **kwargs):
+ # type: (Any) -> None
+ self.name = kwargs.pop('name', None) # type: str
+ self.columns = kwargs.pop('columns', None) # type: Optional[str]
+ self.columns_types = kwargs.pop('column_types', None) # type: Optional[Any]
+ _rows = kwargs.pop('rows', None)
+ self.rows = [process_row(self.columns_types, row) for row in _rows]
@classmethod
def _from_generated(cls, generated):
return cls(
name=generated.name,
- columns=[LogsTableColumn(name=col.name, type=col.type) for col in generated.columns],
+ columns=[col.name for col in generated.columns],
+ column_types=[col.type for col in generated.columns],
rows=generated.rows
)
-class LogsTableColumn(InternalColumn):
- """A column in a table.
-
- :ivar name: The name of this column.
- :vartype name: str
- :ivar type: The data type of this column.
- :vartype type: str
- """
-
- _attribute_map = {
- "name": {"key": "name", "type": "str"},
- "type": {"key": "type", "type": "str"},
- }
-
- def __init__(self, **kwargs):
- # type: (Any) -> None
- super(LogsTableColumn, self).__init__(**kwargs)
- self.name = kwargs.get("name", None)
- self.type = kwargs.get("type", None)
-
-
class LogsQueryResult(object):
"""Contains the tables, columns & rows resulting from a query.
@@ -76,7 +59,7 @@ class LogsQueryResult(object):
visualization selected by the query and any properties for that visualization.
:vartype visualization: object
:ivar error: Any error info.
- :vartype error: object
+ :vartype error: ~azure.core.exceptions.HttpResponseError
"""
def __init__(self, **kwargs):
# type: (Any) -> None
@@ -124,7 +107,7 @@ class MetricsResult(object):
:ivar resource_region: The region of the resource that has been queried for metrics.
:vartype resource_region: str
:ivar metrics: Required. The value of the collection.
- :vartype metrics: list[~monitor_query_client.models.Metric]
+ :vartype metrics: list[~azure.monitor.query.Metric]
"""
def __init__(self, **kwargs):
# type: (Any) -> None
@@ -158,9 +141,9 @@ class LogsBatchQuery(object):
:param query: The Analytics query. Learn more about the `Analytics query syntax
`_.
:type query: str
- :param timespan: The timespan for which to query the data. This can be a timedelta,
+ :keyword timespan: The timespan for which to query the data. This can be a timedelta,
a timedelta and a start datetime, or a start datetime/end datetime.
- :type timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
+ :paramtype timespan: ~datetime.timedelta or tuple[~datetime.datetime, ~datetime.timedelta]
or tuple[~datetime.datetime, ~datetime.datetime]
:keyword additional_workspaces: A list of workspaces that are included in the query.
These can be qualified workspace names, workspace Ids, or Azure resource Ids.
@@ -171,12 +154,12 @@ class LogsBatchQuery(object):
:keyword bool include_visualization: In the query language, it is possible to specify different
visualization options. By default, the API does not return information regarding the type of
visualization to show.
- :keyword headers: Dictionary of :code:``.
- :paramtype headers: dict[str, str]
"""
- def __init__(self, query, workspace_id, timespan, **kwargs): #pylint: disable=super-init-not-called
- # type: (str, str, Optional[str], Any) -> None
+ def __init__(self, workspace_id, query, **kwargs): #pylint: disable=super-init-not-called
+ # type: (str, str, Any) -> None
+ if 'timespan' not in kwargs:
+ raise TypeError("LogsBatchQuery() missing 1 required keyword-only argument: 'timespan'")
include_statistics = kwargs.pop("include_statistics", False)
include_visualization = kwargs.pop("include_visualization", False)
server_timeout = kwargs.pop("server_timeout", None)
@@ -192,12 +175,8 @@ def __init__(self, query, workspace_id, timespan, **kwargs): #pylint: disable=su
prefer += ","
prefer += "include-render=true"
- headers = kwargs.get("headers", None)
- try:
- headers['Prefer'] = prefer
- except TypeError:
- headers = {'Prefer': prefer}
- timespan = construct_iso8601(timespan)
+ headers = {'Prefer': prefer}
+ timespan = construct_iso8601(kwargs.pop('timespan'))
additional_workspaces = kwargs.pop("additional_workspaces", None)
self.id = str(uuid.uuid4())
self.body = {
@@ -230,7 +209,7 @@ class LogsBatchQueryResult(object):
visualization selected by the query and any properties for that visualization.
:vartype visualization: object
:ivar error: Any error info.
- :vartype error: object
+ :vartype error: ~azure.core.exceptions.HttpResponseError
"""
def __init__(
self,
@@ -276,16 +255,16 @@ class MetricNamespaceClassification(str, Enum):
class MetricNamespace(object):
"""Metric namespace class specifies the metadata for a metric namespace.
- :keyword id: The ID of the metricNamespace.
- :paramtype id: str
- :keyword type: The type of the namespace.
- :paramtype type: str
- :keyword name: The name of the namespace.
- :paramtype name: str
- :keyword fully_qualified_namespace: The fully qualified namespace name.
- :paramtype fully_qualified_namespace: str
- :keyword namespace_classification: Kind of namespace. Possible values include: "Platform", "Custom", "Qos".
- :paramtype namespace_classification: str or ~azure.monitor.query.MetricNamespaceClassification
+ :ivar id: The ID of the metricNamespace.
+ :vartype id: str
+ :ivar type: The type of the namespace.
+ :vartype type: str
+ :ivar name: The name of the namespace.
+ :vartype name: str
+ :ivar fully_qualified_namespace: The fully qualified namespace name.
+ :vartype fully_qualified_namespace: str
+ :ivar namespace_classification: Kind of namespace. Possible values include: "Platform", "Custom", "Qos".
+ :vartype namespace_classification: str or ~azure.monitor.query.MetricNamespaceClassification
"""
def __init__(
self,
@@ -327,35 +306,35 @@ class MetricClass(str, Enum):
class MetricDefinition(object): #pylint: disable=too-many-instance-attributes
"""Metric definition class specifies the metadata for a metric.
- :keyword dimension_required: Flag to indicate whether the dimension is required.
- :paramtype dimension_required: bool
- :keyword resource_id: the resource identifier of the resource that emitted the metric.
- :paramtype resource_id: str
- :keyword namespace: the namespace the metric belongs to.
- :paramtype namespace: str
- :keyword name: the name and the display name of the metric, i.e. it is a localizable string.
- :paramtype name: str
- :keyword unit: the unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
+ :ivar dimension_required: Flag to indicate whether the dimension is required.
+ :vartype dimension_required: bool
+ :ivar resource_id: the resource identifier of the resource that emitted the metric.
+ :vartype resource_id: str
+ :ivar namespace: the namespace the metric belongs to.
+ :vartype namespace: str
+ :ivar name: the name and the display name of the metric, i.e. it is a localizable string.
+ :vartype name: str
+ :ivar unit: the unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds", "Unspecified",
"Cores", "MilliCores", "NanoCores", "BitsPerSecond".
- :paramtype unit: str or ~monitor_query_client.models.Unit
- :keyword primary_aggregation_type: the primary aggregation type value defining how to use the
+ :vartype unit: str or ~azure.monitor.query.MetricUnit
+ :ivar primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Possible values include: "None", "Average", "Count", "Minimum", "Maximum",
"Total".
- :paramtype primary_aggregation_type: str or ~azure.monitor.query.MetricAggregationType
- :keyword metric_class: The class of the metric. Possible values include: "Availability",
+ :vartype primary_aggregation_type: str or ~azure.monitor.query.MetricAggregationType
+ :ivar metric_class: The class of the metric. Possible values include: "Availability",
"Transactions", "Errors", "Latency", "Saturation".
- :paramtype metric_class: str or ~azure.monitor.query.MetricClass
- :keyword supported_aggregation_types: the collection of what aggregation types are supported.
- :paramtype supported_aggregation_types: list[str or ~azure.monitor.query.MetricAggregationType]
- :keyword metric_availabilities: the collection of what aggregation intervals are available to be
+ :vartype metric_class: str or ~azure.monitor.query.MetricClass
+ :ivar supported_aggregation_types: the collection of what aggregation types are supported.
+ :vartype supported_aggregation_types: list[str or ~azure.monitor.query.MetricAggregationType]
+ :ivar metric_availabilities: the collection of what aggregation intervals are available to be
queried.
- :paramtype metric_availabilities: list[~azure.monitor.query.MetricAvailability]
- :keyword id: the resource identifier of the metric definition.
- :paramtype id: str
- :keyword dimensions: the name and the display name of the dimension, i.e. it is a localizable
+ :vartype metric_availabilities: list[~azure.monitor.query.MetricAvailability]
+ :ivar id: the resource identifier of the metric definition.
+ :vartype id: str
+ :ivar dimensions: the name and the display name of the dimension, i.e. it is a localizable
string.
- :paramtype dimensions: list[str]
+ :vartype dimensions: list[str]
"""
def __init__(
self,
@@ -459,7 +438,7 @@ class Metric(object):
"Unspecified", "Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:vartype unit: str
:ivar timeseries: Required. The time series returned when a data query is performed.
- :vartype timeseries: list[~monitor_query_client.models.TimeSeriesElement]
+ :vartype timeseries: list[~azure.monitor.query.TimeSeriesElement]
:ivar display_description: Detailed description of this metric.
:vartype display_description: str
"""
@@ -498,7 +477,7 @@ class TimeSeriesElement(object):
:vartype metadata_values: dict(str, str)
:ivar data: An array of data points representing the metric values. This is only returned if
a result type of data is specified.
- :vartype data: list[~monitor_query_client.models.MetricValue]
+ :vartype data: list[~azure.monitor.query.MetricValue]
"""
_attribute_map = {
@@ -530,12 +509,12 @@ class MetricAvailability(object):
"""Metric availability specifies the time grain (aggregation interval or frequency)
and the retention period for that time grain.
- :keyword granularity: the time grain specifies the aggregation interval for the metric. Expressed
+ :ivar granularity: the time grain specifies the aggregation interval for the metric. Expressed
as a duration 'PT1M', 'P1D', etc.
- :paramtype granularity: ~datetime.timedelta
- :keyword retention: the retention period for the metric at the specified timegrain. Expressed as
+ :vartype granularity: ~datetime.timedelta
+ :ivar retention: the retention period for the metric at the specified timegrain. Expressed as
a duration 'PT1M', 'P1D', etc.
- :paramtype retention: ~datetime.timedelta
+ :vartype retention: ~datetime.timedelta
"""
def __init__(
self,
@@ -565,3 +544,22 @@ class MetricAggregationType(str, Enum):
MINIMUM = "Minimum"
MAXIMUM = "Maximum"
TOTAL = "Total"
+
+
+class MetricUnit(str, Enum):
+ """The unit of the metric.
+ """
+
+ COUNT = "Count"
+ BYTES = "Bytes"
+ SECONDS = "Seconds"
+ COUNT_PER_SECOND = "CountPerSecond"
+ BYTES_PER_SECOND = "BytesPerSecond"
+ PERCENT = "Percent"
+ MILLI_SECONDS = "MilliSeconds"
+ BYTE_SECONDS = "ByteSeconds"
+ UNSPECIFIED = "Unspecified"
+ CORES = "Cores"
+ MILLI_CORES = "MilliCores"
+ NANO_CORES = "NanoCores"
+ BITS_PER_SECOND = "BitsPerSecond"
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
index deb3ba2a99f9..5039f62402f6 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_logs_query_client_async.py
@@ -6,7 +6,7 @@
# --------------------------------------------------------------------------
from datetime import datetime, timedelta
-from typing import Any, Tuple, Union, Sequence, Dict, Optional, TYPE_CHECKING
+from typing import Any, Tuple, Union, Sequence, Dict, List, TYPE_CHECKING
from azure.core.exceptions import HttpResponseError
from azure.core.tracing.decorator_async import distributed_trace_async
@@ -45,7 +45,8 @@ async def query(
self,
workspace_id: str,
query: str,
- timespan: Optional[Union[timedelta, Tuple[datetime, timedelta], Tuple[datetime, datetime]]] = None,
+ *,
+ timespan: Union[timedelta, Tuple[datetime, timedelta], Tuple[datetime, datetime]],
**kwargs: Any) -> LogsQueryResult:
"""Execute an Analytics query.
@@ -114,7 +115,7 @@ async def query_batch(
self,
queries: Union[Sequence[Dict], Sequence[LogsBatchQuery]],
**kwargs: Any
- ) -> Sequence[LogsBatchQueryResult]:
+ ) -> List[LogsBatchQueryResult]:
"""Execute a list of analytics queries. Each request can be either a LogQueryRequest
object or an equivalent serialized model.
@@ -123,7 +124,7 @@ async def query_batch(
:param queries: The list of queries that should be processed
:type queries: list[dict] or list[~azure.monitor.query.LogsBatchQuery]
:return: list of LogsBatchQueryResult objects, or the result of cls(response)
- :rtype: ~list[~azure.monitor.query.LogsBatchQueryResult]
+ :rtype: list[~azure.monitor.query.LogsBatchQueryResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
try:
diff --git a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
index dfdc2906f38b..0d06cbe8a0b1 100644
--- a/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/azure/monitor/query/aio/_metrics_query_client_async.py
@@ -9,6 +9,7 @@
from datetime import timedelta
from typing import TYPE_CHECKING, Any, List, Optional
+from msrest.serialization import Serializer
from azure.core.async_paging import AsyncItemPaged
from azure.core.tracing.decorator import distributed_trace
@@ -88,9 +89,6 @@ async def query(
‘c1’**\ :code:`
`- Return all time series where A = a1:code:`
`\ **$filter=A eq ‘a1’ and
B eq ‘\ *’ and C eq ‘*\ ’**.
:paramtype filter: str
- :keyword result_type: Reduces the set of data collected. The syntax allowed depends on the
- operation. See the operation's description for details.
- :paramtype result_type: str or ~monitor_query_client.models.ResultType
:keyword metric_namespace: Metric namespace to query metric definitions for.
:paramtype metric_namespace: str
:return: Response, or the result of cls(response)
@@ -115,15 +113,19 @@ def list_metric_namespaces(self, resource_uri: str, **kwargs: Any) -> AsyncItemP
:param resource_uri: The identifier of the resource.
:type resource_uri: str
- :keyword start_time: The ISO 8601 conform Date start time from which to query for metric
- namespaces.
- :paramtype start_time: str
+ :keyword start_time: The start time from which to query for metric
+ namespaces. This should be provided as a datetime object.
+ :paramtype start_time: ~datetime.datetime
:return: An iterator like instance of either MetricNamespace or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.monitor.query.MetricNamespace]
+ :rtype: ~azure.core.paging.AsyncItemPaged[:class: `~azure.monitor.query.MetricNamespace`]
:raises: ~azure.core.exceptions.HttpResponseError
"""
+ start_time = kwargs.pop('start_time', None)
+ if start_time:
+ start_time = Serializer.serialize_iso(start_time)
return self._namespace_op.list(
resource_uri,
+ start_time,
cls=kwargs.pop(
"cls",
lambda objs: [
@@ -136,19 +138,19 @@ def list_metric_namespaces(self, resource_uri: str, **kwargs: Any) -> AsyncItemP
def list_metric_definitions(
self,
resource_uri: str,
- metric_namespace: str = None,
**kwargs: Any
) -> AsyncItemPaged[MetricDefinition]:
"""Lists the metric definitions for the resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
- :param metric_namespace: Metric namespace to query metric definitions for.
- :type metric_namespace: str
+ :keyword namespace: Metric namespace to query metric definitions for.
+ :paramtype namespace: str
:return: An iterator like instance of either MetricDefinitionCollection or the result of cls(response)
- :rtype: ~azure.core.paging.ItemPaged[~azure.monitor.query.MetricDefinition]
+ :rtype: ~azure.core.paging.AsyncItemPaged[:class: `~azure.monitor.query.MetricDefinition`]
:raises: ~azure.core.exceptions.HttpResponseError
"""
+ metric_namespace = kwargs.pop('namespace', None)
return self._definitions_op.list(
resource_uri,
metric_namespace,
diff --git a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py
index c4578b06747c..4b52d4186807 100644
--- a/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py
+++ b/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_client_async.py
@@ -4,17 +4,13 @@
import os
import asyncio
from datetime import datetime, timedelta
-import urllib3
from azure.monitor.query.aio import MetricsQueryClient
+from azure.monitor.query import MetricAggregationType
from azure.identity.aio import DefaultAzureCredential
-urllib3.disable_warnings()
-
async def query_metrics():
credential = DefaultAzureCredential(
- client_id = os.environ['AZURE_CLIENT_ID'],
- client_secret = os.environ['AZURE_CLIENT_SECRET'],
- tenant_id = os.environ['AZURE_TENANT_ID']
+
)
client = MetricsQueryClient(credential)
@@ -23,16 +19,17 @@ async def query_metrics():
async with client:
response = await client.query(
metrics_uri,
- metric_names=["PublishSuccessCount"],
- start_time=datetime(2021, 5, 25),
- duration=timedelta(days=1)
+ metric_names=["Ingress"],
+ timespan=timedelta(hours=2),
+ granularity=timedelta(minutes=15),
+ aggregations=[MetricAggregationType.AVERAGE],
)
for metric in response.metrics:
print(metric.name)
for time_series_element in metric.timeseries:
for metric_value in time_series_element.data:
- print(metric_value.time_stamp)
+ print(metric_value.timestamp)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
diff --git a/sdk/monitor/azure-monitor-query/samples/champion_scenarios.md b/sdk/monitor/azure-monitor-query/samples/champion_scenarios.md
new file mode 100644
index 000000000000..d104a65ead8f
--- /dev/null
+++ b/sdk/monitor/azure-monitor-query/samples/champion_scenarios.md
@@ -0,0 +1,288 @@
+## Azure Monitor Query Champion Scenarios
+
+This document covers the basic champion Scenarios to use the package.
+
+### Authenticate the client
+
+Consider the following example, which creates and authenticates clients for both logs and metrics querying:
+
+```python
+from azure.identity import DefaultAzureCredential
+from azure.monitor.query import LogsQueryClient, MetricsQueryClient
+
+credential = DefaultAzureCredential()
+logs_client = LogsQueryClient(credential)
+metrics_client = MetricsQueryClient(credential)
+```
+
+### Make a simple query to the service
+
+* Each row is converted into a native python data type. For example, time is a datetime object instead of string.
+
+#### Results in tabular form
+
+```python
+import os
+import pandas as pd
+from datetime import timedelta
+from azure.monitor.query import LogsQueryClient
+from azure.identity import DefaultAzureCredential
+
+def query():
+ query = """AppRequests |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
+
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query,
+ timespan=timedelta(days=1))
+
+ if not response.tables:
+ return None
+
+ primary_table = response.tables[0]
+ df = pd.DataFrame(table.rows, columns=table.columns)
+ return df
+
+if __name__ == '__main__':
+ print(query())
+
+"""
+ TimeGenerated _ResourceId avgRequestDuration
+0 2021-05-27T08:40:00Z /subscriptions/... 27.307699999999997
+1 2021-05-27T08:50:00Z /subscriptions/... 18.11655
+2 2021-05-27T09:00:00Z /subscriptions/... 24.5271
+"""
+
+```
+
+#### Results in Key Value form
+
+```python
+import os
+import pandas as pd
+from datetime import timedelta
+from azure.monitor.query import LogsQueryClient
+from azure.identity import DefaultAzureCredential
+
+def query():
+ query = """AppRequests |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
+
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query,
+ timespan=timedelta(days=1))
+
+ if not response.tables:
+ return None
+
+ primary_table = response.tables[0]
+ df = pd.DataFrame(table.rows, columns=table.columns)
+ return df.to_dict(orient='records')
+
+if __name__ == '__main__':
+ print(query())
+
+
+"""
+[
+ {
+ 'TimeGenerated': Timestamp('2021-08-24 01:10:00+0000'),
+ '_ResourceId': '/subscriptions/faa080af....',
+ 'avgRequestDuration': 19.7987
+ },
+ {
+ 'TimeGenerated': Timestamp('2021-08-24 01:10:00+0000'),
+ '_ResourceId': '/subscriptions/faa08....',
+ 'avgRequestDuration': 33.9654
+ },
+ {
+ 'TimeGenerated': Timestamp('2021-08-24 01:10:00+0000'),
+ '_ResourceId': '/subscriptions/faa080....',
+ 'avgRequestDuration': 44.13115
+ }
+]
+"""
+
+```
+
+### Run multiple queries in 1 api call
+
+* batch_query returns the results as a list in the same order in which the requests were sent.
+* Each item in the result will have an error attribute if there is an error.
+
+#### Results in tabular form
+
+```python
+from datetime import datetime, timedelta
+import os
+import pandas as pd
+from azure.monitor.query import LogsQueryClient, LogsBatchQuery
+from azure.identity import DefaultAzureCredential
+
+
+credential = DefaultAzureCredential()
+
+client = LogsQueryClient(credential)
+
+requests = [
+ LogsBatchQuery(
+ query="AzureActivity | summarize count()",
+ timespan=timedelta(hours=1),
+ workspace_id= os.environ['LOG_WORKSPACE_ID']
+ ),
+ LogsBatchQuery(
+ query= """AppRequests | take 5 |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
+ timespan=(datetime(2021, 6, 2), timedelta(hours=1)),
+ workspace_id= os.environ['LOG_WORKSPACE_ID']
+ ),
+ LogsBatchQuery(
+ query= """AppRequests | take 5 |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
+ workspace_id= os.environ['LOG_WORKSPACE_ID'],
+ timespan=(datetime(2021, 6, 2), datetime(2021, 6, 3)),
+ include_statistics=True
+ ),
+]
+results = client.query_batch(requests)
+
+for response in results:
+ if response.error is not None:
+ error = response.error.innererror
+ print(error)
+
+ table = response.tables[0]
+ df = pd.DataFrame(table.rows, columns=table.columns)
+ print(df)
+ print("\n\n-------------------------\n\n")
+
+"""
+ count_
+0 2
+
+
+-------------------------
+
+
+ TimeGenerated _ResourceId avgRequestDuration
+0 2021-06-02 00:20:00+00:00 /subscriptions/... 18.12380
+1 2021-06-02 00:00:00+00:00 /subscriptions/... 20.84805
+2 2021-06-02 00:10:00+00:00 /subscriptions/... 19.72410
+3 2021-06-02 00:30:00+00:00 /subscriptions/... 19.41265
+4 2021-06-02 00:40:00+00:00 /subscriptions/... 19.17145
+
+
+-------------------------
+
+
+
+ TimeGenerated _ResourceId avgRequestDuration
+0 2021-06-02 00:20:00+00:00 /subscriptions/... 18.12380
+1 2021-06-02 00:00:00+00:00 /subscriptions/... 20.84805
+2 2021-06-02 00:10:00+00:00 /subscriptions/... 19.72410
+3 2021-06-02 00:30:00+00:00 /subscriptions/... 19.41265
+4 2021-06-02 00:40:00+00:00 /subscriptions/... 19.17145
+
+
+
+-------------------------
+"""
+```
+
+#### Results in Key Value form
+
+
+Very Simlar to above:
+
+```python
+for response in results:
+ if response.error is not None:
+ error = response.error.innererror
+ print(error)
+
+ table = response.tables[0]
+ df = pd.DataFrame(table.rows, columns=table.columns)
+ print(df.to_dict(orient='records'))
+ print("\n\n-------------------------\n\n")
+```
+
+### Run a complex query to set server timeout for more than 3 minutes.
+
+```python
+import os
+import pandas as pd
+from azure.core.serialization import NULL
+from azure.monitor.query import LogsQueryClient
+from azure.identity import DefaultAzureCredential
+
+
+credential = DefaultAzureCredential()
+
+client = LogsQueryClient(credential)
+
+response = client.query(
+ os.environ['LOG_WORKSPACE_ID'],
+ "range x from 1 to 10000000000 step 1 | count",
+ timespan=NULL, # can pass None too
+ server_timeout=600
+ )
+
+### results in server timeout
+```
+
+### Run a metrics Query
+
+```python
+import os
+from datetime import timedelta
+from azure.monitor.query import MetricsQueryClient, MetricAggregationType
+from azure.identity import DefaultAzureCredential
+
+credential = DefaultAzureCredential()
+
+client = MetricsQueryClient(credential)
+
+metrics_uri = os.environ['METRICS_RESOURCE_URI']
+response = client.query(
+ metrics_uri,
+ metric_names=["Ingress"],
+ timespan=timedelta(hours=2),
+ granularity=timedelta(minutes=5),
+ aggregations=[MetricAggregationType.AVERAGE],
+ )
+
+for metric in response.metrics:
+ print(metric.name + ' -- ' + metric.display_description)
+ for time_series_element in metric.timeseries:
+ for metric_value in time_series_element.data:
+ print('The ingress at {} is {}'.format(
+ metric_value.timestamp,
+ metric_value.average
+ ))
+
+"""
+Ingress -- The amount of ingress data, in bytes. This number includes ingress from an external client into Azure Storage as well as ingress within Azure.
+The ingress at 2021-08-23 23:58:00+00:00 is 567.4285714285714
+The ingress at 2021-08-24 00:03:00+00:00 is 812.0
+The ingress at 2021-08-24 00:08:00+00:00 is 812.0
+The ingress at 2021-08-24 00:13:00+00:00 is 812.0
+The ingress at 2021-08-24 00:18:00+00:00 is 812.0
+The ingress at 2021-08-24 00:23:00+00:00 is 3623.3333333333335
+The ingress at 2021-08-24 00:28:00+00:00 is 1082.75
+The ingress at 2021-08-24 00:33:00+00:00 is 1160.6666666666667
+The ingress at 2021-08-24 00:38:00+00:00 is 1060.75
+The ingress at 2021-08-24 00:43:00+00:00 is 1081.75
+The ingress at 2021-08-24 00:48:00+00:00 is 1061.25
+The ingress at 2021-08-24 00:53:00+00:00 is 1160.3333333333333
+The ingress at 2021-08-24 00:58:00+00:00 is 1082.0
+The ingress at 2021-08-24 01:03:00+00:00 is 1628.6666666666667
+The ingress at 2021-08-24 01:08:00+00:00 is 794.6666666666666
+The ingress at 2021-08-24 01:13:00+00:00 is 1060.25
+The ingress at 2021-08-24 01:18:00+00:00 is 1160.0
+The ingress at 2021-08-24 01:23:00+00:00 is 1082.0
+The ingress at 2021-08-24 01:28:00+00:00 is 1060.5
+The ingress at 2021-08-24 01:33:00+00:00 is 1630.0
+The ingress at 2021-08-24 01:38:00+00:00 is 795.0
+The ingress at 2021-08-24 01:43:00+00:00 is 827.6
+The ingress at 2021-08-24 01:48:00+00:00 is 1250.5
+The ingress at 2021-08-24 01:53:00+00:00 is 1061.75
+"""
+```
\ No newline at end of file
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py b/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py
index c7e1ec025b93..3705d94cf494 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py
@@ -16,19 +16,19 @@
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
- duration=timedelta(hours=1),
+ timespan=timedelta(hours=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
- duration=timedelta(hours=1),
- start_time=datetime(2021, 6, 2),
+ timespan=(datetime(2021, 6, 2), timedelta(hours=1)),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
- query= "AppRequestss | take 5",
+ query= "AppRequests | take 5",
workspace_id= os.environ['LOG_WORKSPACE_ID'],
+ timespan=(datetime(2021, 6, 2), datetime(2021, 6, 3)),
include_statistics=True
),
]
@@ -39,7 +39,8 @@
table = response.tables[0]
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
print(df)
+ print("\n\n-------------------------\n\n")
except TypeError:
- print(response.error)
+ print(response.error.innererror)
# [END send_query_batch]
\ No newline at end of file
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
index fd2d13a4b891..917f6a16e698 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py
@@ -3,8 +3,7 @@
import os
import pandas as pd
-from datetime import datetime, timedelta
-from msrest.serialization import UTC
+from datetime import timedelta
from azure.monitor.query import LogsQueryClient
from azure.identity import DefaultAzureCredential
@@ -20,13 +19,6 @@
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
-query = """
-AppRequests
-| where TimeGenerated > ago(1h)
-| fork
- ( summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId )
-"""
-
# returns LogsQueryResult
response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1))
@@ -35,8 +27,7 @@
for table in response.tables:
try:
- print ([col.name for col in table.columns])
- df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
+ df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
except TypeError:
print(response.error)
diff --git a/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py b/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py
index 224d7f05e8ea..4027ddd1b308 100644
--- a/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py
+++ b/sdk/monitor/azure-monitor-query/samples/sample_metrics_query_client.py
@@ -7,7 +7,7 @@
from azure.monitor.query import MetricsQueryClient, MetricAggregationType
from azure.identity import DefaultAzureCredential
-urllib3.disable_warnings()
+# urllib3.disable_warnings()
# [START metrics_client_auth_with_token_cred]
credential = DefaultAzureCredential()
@@ -19,20 +19,18 @@
metrics_uri = os.environ['METRICS_RESOURCE_URI']
response = client.query(
metrics_uri,
- metric_names=["MatchedEventCount", "DeliverySuccesssCount"],
- timespan=timedelta(days=1),
- aggregations=[MetricAggregationType.COUNT]
+ metric_names=["Ingress"],
+ timespan=timedelta(hours=2),
+ granularity=timedelta(minutes=5),
+ aggregations=[MetricAggregationType.AVERAGE],
)
for metric in response.metrics:
- print(metric.name)
+ print(metric.name + ' -- ' + metric.display_description)
for time_series_element in metric.timeseries:
for metric_value in time_series_element.data:
- if metric_value.count != 0:
- print(
- "There are {} matched events at {}".format(
- metric_value.count,
- metric_value.time_stamp
- )
- )
+ print('The ingress at {} is {}'.format(
+ metric_value.timestamp,
+ metric_value.average
+ ))
# [END send_metrics_query]
diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
index 6c9ea9f6fd13..fb49883cff6d 100644
--- a/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
+++ b/sdk/monitor/azure-monitor-query/tests/async/test_logs_client_async.py
@@ -16,6 +16,7 @@ def _credential():
return credential
@pytest.mark.live_test_only
+@pytest.mark.asyncio
async def test_logs_auth():
credential = _credential()
client = LogsQueryClient(credential)
@@ -29,8 +30,23 @@ async def test_logs_auth():
assert response is not None
assert response.tables is not None
+@pytest.mark.live_test_only
+@pytest.mark.asyncio
+async def test_logs_auth_no_timespan():
+ credential = _credential()
+ client = LogsQueryClient(credential)
+ query = """AppRequests |
+ where TimeGenerated > ago(12h) |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
+
+ # returns LogsQueryResult
+ with pytest.raises(TypeError):
+ await client.query(os.environ['LOG_WORKSPACE_ID'], query)
+
+
@pytest.mark.skip("https://github.com/Azure/azure-sdk-for-python/issues/19917")
@pytest.mark.live_test_only
+@pytest.mark.asyncio
async def test_logs_server_timeout():
client = LogsQueryClient(_credential())
with pytest.raises(HttpResponseError) as e:
@@ -43,6 +59,16 @@ async def test_logs_server_timeout():
assert e.message.contains('Gateway timeout')
@pytest.mark.live_test_only
+@pytest.mark.asyncio
+async def test_logs_query_batch_raises_on_no_timespan():
+ with pytest.raises(TypeError):
+ LogsBatchQuery(
+ query="AzureActivity | summarize count()",
+ workspace_id= os.environ['LOG_WORKSPACE_ID']
+ )
+
+@pytest.mark.live_test_only
+@pytest.mark.asyncio
async def test_logs_query_batch():
client = LogsQueryClient(_credential())
diff --git a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
index e205e67734e2..ae9ba2f52b2d 100644
--- a/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
+++ b/sdk/monitor/azure-monitor-query/tests/async/test_metrics_client_async.py
@@ -43,17 +43,21 @@ async def test_metrics_granularity():
assert response.granularity == timedelta(minutes=5)
@pytest.mark.live_test_only
+@pytest.mark.asyncio
async def test_metrics_namespaces():
client = MetricsQueryClient(_credential())
- response = await client.list_metric_namespaces(os.environ['METRICS_RESOURCE_URI'])
+ async with client:
+ response = client.list_metric_namespaces(os.environ['METRICS_RESOURCE_URI'])
- assert response is not None
+ assert response is not None
@pytest.mark.live_test_only
+@pytest.mark.asyncio
async def test_metrics_definitions():
client = MetricsQueryClient(_credential())
- response = await client.list_metric_definitions(os.environ['METRICS_RESOURCE_URI'], metric_namespace='microsoft.eventgrid/topics')
+ async with client:
+ response = client.list_metric_definitions(os.environ['METRICS_RESOURCE_URI'], namespace='microsoft.eventgrid/topics')
- assert response is not None
+ assert response is not None
diff --git a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
index e9e91b316dcc..dba61f5471d1 100644
--- a/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
+++ b/sdk/monitor/azure-monitor-query/tests/test_logs_client.py
@@ -22,11 +22,23 @@ def test_logs_single_query():
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
# returns LogsQueryResult
- response = client.query(os.environ['LOG_WORKSPACE_ID'], query)
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None)
assert response is not None
assert response.tables is not None
+@pytest.mark.live_test_only
+def test_logs_single_query_raises_no_timespan():
+ credential = _credential()
+ client = LogsQueryClient(credential)
+ query = """AppRequests |
+ where TimeGenerated > ago(12h) |
+ summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""
+
+ # returns LogsQueryResult
+ with pytest.raises(TypeError):
+ client.query(os.environ['LOG_WORKSPACE_ID'], query)
+
@pytest.mark.live_test_only
def test_logs_single_query_with_non_200():
credential = _credential()
@@ -107,7 +119,7 @@ def test_logs_single_query_with_render():
query = """AppRequests"""
# returns LogsQueryResult
- response = client.query(os.environ['LOG_WORKSPACE_ID'], query, include_visualization=True)
+ response = client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True)
assert response.visualization is not None
diff --git a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
index ecdb7e7c8a3a..17f61ccb812b 100644
--- a/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
+++ b/sdk/monitor/azure-monitor-query/tests/test_metrics_client.py
@@ -51,6 +51,6 @@ def test_metrics_namespaces():
def test_metrics_definitions():
client = MetricsQueryClient(_credential())
- response = client.list_metric_definitions(os.environ['METRICS_RESOURCE_URI'], metric_namespace='microsoft.eventgrid/topics')
+ response = client.list_metric_definitions(os.environ['METRICS_RESOURCE_URI'], namespace='microsoft.eventgrid/topics')
assert response is not None