Skip to content

Commit

Permalink
Query - samples + README + docstrings (Azure#20869)
Browse files Browse the repository at this point in the history
* Samples fix

* README

* docstrings

* mypy

* lint

* test fix

* lint
  • Loading branch information
Rakshith Bhyravabhotla authored Sep 27, 2021
1 parent 559840c commit 883bdae
Show file tree
Hide file tree
Showing 28 changed files with 671 additions and 427 deletions.
153 changes: 101 additions & 52 deletions sdk/monitor/azure-monitor-query/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,68 +106,83 @@ The `timespan` parameter specifies the time duration for which to query the data
```python
import os
import pandas as pd
from datetime import datetime
from datetime import datetime, timezone
from azure.monitor.query import LogsQueryClient
from azure.identity import DefaultAzureCredential

credential = DefaultAzureCredential()
client = LogsQueryClient(credential)

# Response time trend
# request duration over the last 12 hours
query = """AppRequests |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId"""

start_time=datetime(2021, 7, 2)
end_time=datetime.now()

# returns LogsQueryResult
response = client.query_workspace(
os.environ['LOG_WORKSPACE_ID'],
query,
timespan=(start_time, end_time)
)

if not response.tables:
print("No results for the query")

for table in response.tables:
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
print(df)
query = """AppRequests | take 5"""

start_time=datetime(2021, 7, 2, tzinfo=timezone.utc)
end_time=datetime(2021, 7, 4, tzinfo=timezone.utc)

try:
response = client.query_workspace(
workspace_id=os.environ['LOG_WORKSPACE_ID'],
query=query,
timespan=(start_time, end_time)
)
if response.status == LogsQueryStatus.PARTIAL:
error = response.partial_error
data = response.partial_data
print(error.message)
elif response.status == LogsQueryStatus.SUCCESS:
data = response.tables
for table in data:
df = pd.DataFrame(data=table.rows, columns=table.columns)
print(df)
except HttpResponseError as err:
print("something fatal happened")
print (err)
```

#### Handle logs query response

The `query` API returns the `LogsQueryResult` while the `batch_query` API returns list of `LogsQueryResult`. Here's a hierarchy of the response:
The `query` API returns a union of `LogsQueryResult` and `LogsQueryPartialResult` while the `batch_query` API returns list of `LogsQueryResult`, `LogsQueryPartialResult` and `LogsQueryError` objects. Here's a hierarchy of the response:

```
LogsQueryResult
|---statistics
|---visualization
|---error
|---tables (list of `LogsTable` objects)
|---name
|---rows
|---columns (list of `LogsTableColumn` objects)
|---name
|---type
|---columns
|---column_types
LogsQueryPartialResult
|---statistics
|---visualization
|---partial_error (a `LogsQueryError` object)
|---partial_data (list of `LogsTable` objects)
|---name
|---rows
|---columns
|---column_types
```

The `LogsQueryResult` directly iterates over the table as a convinience.
For example, to handle a logs query response with tables and display it using pandas:

```python
table = response.tables[0]
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
response = client.query(...)
for table in response:
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
```

A full sample can be found [here](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_log_query_client.py).
A full sample can be found [here](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_logs_single_query.py).

In a similar fashion, to handle a batch logs query response:

```python
for result in response:
table = result.tables[0]
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
if result.status == LogsQueryStatus.SUCCESS:
for table in result:
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
```

A full sample can be found [here](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py).
Expand All @@ -178,41 +193,51 @@ The following example demonstrates sending multiple queries at the same time usi

```python
import os
from datetime import timedelta
from datetime import timedelta, datetime, timezone
import pandas as pd
from azure.monitor.query import LogsQueryClient, LogsQueryRequest
from azure.monitor.query import LogsQueryClient, LogsBatchQuery, LogsQueryStatus
from azure.identity import DefaultAzureCredential

credential = DefaultAzureCredential()
client = LogsQueryClient(credential)

requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
timespan=timedelta(hours=1),
workspace_id=os.environ['LOG_WORKSPACE_ID']
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= """AppRequests | take 10 |
summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""",
timespan=(datetime(2021, 6, 2), timedelta(hours=1)),
workspace_id=os.environ['LOG_WORKSPACE_ID']
query= """bad query""",
timespan=timedelta(days=1),
workspace_id= os.environ['LOG_WORKSPACE_ID']
),
LogsBatchQuery(
query= "AppRequests | take 2",
workspace_id=os.environ['LOG_WORKSPACE_ID']
query= """let Weight = 92233720368547758;
range x from 1 to 3 step 1
| summarize percentilesw(x, Weight * 100, 50)""",
workspace_id= os.environ['LOG_WORKSPACE_ID'],
timespan=(datetime(2021, 6, 2, tzinfo=timezone.utc), datetime(2021, 6, 5, tzinfo=timezone.utc)), # (start, end)
include_statistics=True
),
]
response = client.query_batch(requests)

for rsp in response:
body = rsp.body
if not body.tables:
print("Something is wrong")
else:
for table in body.tables:
df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns])
results = client.query_batch(requests)

for res in results:
if res.status == LogsQueryStatus.FAILURE:
# this will be a LogsQueryError
print(res.message)
elif res.status == LogsQueryStatus.PARTIAL:
## this will be a LogsQueryPartialResult
print(res.partial_error.message)
for table in res.partial_data:
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
elif res.status == LogsQueryStatus.SUCCESS:
## this will be a LogsQueryResult
table = res.tables[0]
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)

```

### Advanced logs query scenarios
Expand All @@ -233,6 +258,7 @@ client = LogsQueryClient(credential)
response = client.query_workspace(
os.environ['LOG_WORKSPACE_ID'],
"range x from 1 to 10000000000 step 1 | count",
timespan=None,
server_timeout=1,
)
```
Expand Down Expand Up @@ -271,9 +297,11 @@ To find the resource URI:
2. From the **Overview** blade, select the **JSON View** link.
3. In the resulting JSON, copy the value of the `id` property.

**NOTE**: The metrics are returned in the order of the metric_names sent.

```python
import os
from datetime import timedelta
from datetime import timedelta, datetime
from azure.monitor.query import MetricsQueryClient
from azure.identity import DefaultAzureCredential

Expand Down Expand Up @@ -367,6 +395,27 @@ Optional keyword arguments can be passed in at the client and per-operation leve

To learn more about Azure Monitor, see the [Azure Monitor service documentation][azure_monitor_overview].

### Samples
These code samples show common champion scenario operations with the Azure Monitor Query client library.

* Send a single query with LogsQueryClient and handle the response as a table: [sample_logs_single_query.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_logs_single_query.py) ([async_sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/async_samples/sample_log_query_async.py))

* Send a single query with LogsQueryClient and handle the response in key value form: [sample_logs_query_key_value_form.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_logs_query_key_value_form.py)

* Send a single query with LogsQueryClient without pandas: [sample_single_log_query_without_pandas.py.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_single_log_query_without_pandas.py)

* Send a single query with LogsQueryClient across multiple workspaces: [sample_logs_query_multiple_workspaces.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_log_query_multiple_workspaces.py)

* Send multiple queries with LogsQueryClient: [sample_batch_query.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_batch_query.py)

* Send a single query with LogsQueryClient using server timeout: [sample_server_timeout.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_server_timeout.py)

* Send a query using MetricsQueryClient: [sample_metrics_query.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_metrics_query.py) ([async_sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metrics_query_async.py))

* Get a list of metric namespaces: [sample_metric_namespaces.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_metric_namespaces.py) ([async_sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metric_namespaces_async.py))

* Get a list of metric definitions: [sample_metric_definitions.py](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/sample_metric_definitions.py) ([async_sample](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/monitor/azure-monitor-query/samples/async_samples/sample_metric_definitions_async.py))

## Contributing

This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.microsoft.com][cla].
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@
class LogsQueryError(object):
"""The code and message for an error.
All required parameters must be populated in order to send to Azure.
:ivar code: A machine readable error code.
:vartype code: str
:ivar message: A human readable error message.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# license information.
# --------------------------------------------------------------------------
from datetime import datetime, timedelta
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, List, Dict, Any
from msrest import Serializer, Deserializer
from azure.core.exceptions import HttpResponseError
from azure.core.pipeline.policies import BearerTokenCredentialPolicy
Expand Down Expand Up @@ -47,6 +47,7 @@ def get_metrics_authentication_policy(


def order_results(request_order, mapping, **kwargs):
# type: (List, Dict, Any) -> List
ordered = [mapping[id] for id in request_order]
results = []
for item in ordered:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,17 @@


class LogsQueryClient(object):
"""LogsQueryClient
"""LogsQueryClient. Use this client to collect and organize log and performance data from
monitored resources. Data from different sources such as platform logs from Azure services,
log and performance data from virtual machines agents, and usage and performance data from
apps can be consolidated into a single Azure Log Analytics workspace.
The various data types can be analyzed together using the
[Kusto Query Language](https://docs.microsoft.com/azure/data-explorer/kusto/query/)
.. admonition:: Example:
.. literalinclude:: ../samples/sample_log_query_client.py
.. literalinclude:: ../samples/sample_single_logs_query.py
:start-after: [START client_auth_with_token_cred]
:end-before: [END client_auth_with_token_cred]
:language: python
Expand Down Expand Up @@ -83,13 +89,13 @@ def query_workspace(self, workspace_id, query, **kwargs):
:keyword additional_workspaces: A list of workspaces that are included in the query.
These can be qualified workspace names, workspace Ids, or Azure resource Ids.
:paramtype additional_workspaces: list[str]
:return: LogsQueryResult, or the result of cls(response)
:return: LogsQueryResult if there is a success or LogsQueryPartialResult when there is a partial success.
:rtype: Union[~azure.monitor.query.LogsQueryResult, ~azure.monitor.query.LogsQueryPartialResult]
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_log_query_client.py
.. literalinclude:: ../samples/sample_single_logs_query.py
:start-after: [START send_logs_query]
:end-before: [END send_logs_query]
:language: python
Expand Down Expand Up @@ -131,7 +137,7 @@ def query_workspace(self, workspace_id, query, **kwargs):
response = LogsQueryPartialResult._from_generated( # pylint: disable=protected-access
generated_response, LogsQueryError
)
return response
return cast(Union[LogsQueryResult, LogsQueryPartialResult], response)

@distributed_trace
def query_batch(
Expand All @@ -140,14 +146,17 @@ def query_batch(
**kwargs # type: Any
):
# type: (...) -> List[Union[LogsQueryResult, LogsQueryPartialResult, LogsQueryError]]
"""Execute a list of analytics queries. Each request can be either a LogQueryRequest
"""Execute a list of analytics queries. Each request can be either a LogsBatchQuery
object or an equivalent serialized model.
The response is returned in the same order as that of the requests sent.
**NOTE**: The response is returned in the same order as that of the requests sent.
:param queries: The list of Kusto queries to execute.
:type queries: list[dict] or list[~azure.monitor.query.LogsBatchQuery]
:return: List of LogsQueryResult, or the result of cls(response)
:return: List of LogsQueryResult, LogsQueryPartialResult and LogsQueryError.
For a given query, a LogsQueryResult is returned if the response is a success, LogsQueryPartialResult
is returned when there is a partial success and a LogsQueryError is returned when there is a failure.
The status of each response can be checked using `LogsQueryStatus` enum.
:rtype: list[Union[~azure.monitor.query.LogsQueryResult, ~azure.monitor.query.LogsQueryPartialResult,
~azure.monitor.query.LogsQueryError]
:raises: ~azure.core.exceptions.HttpResponseError
Expand All @@ -162,7 +171,7 @@ def query_batch(
:caption: Get a response for multiple Log Queries.
"""
try:
queries = [LogsBatchQuery(**q) for q in queries]
queries = [LogsBatchQuery(**cast(Dict, q)) for q in queries]
except (KeyError, TypeError):
pass
queries = [
Expand All @@ -174,7 +183,7 @@ def query_batch(
request_order = [req["id"] for req in queries]
batch = BatchRequest(requests=queries)
generated = self._query_op.batch(batch, **kwargs)
mapping = {item.id: item for item in generated.responses}
mapping = {item.id: item for item in generated.responses} # type: ignore
return order_results(
request_order,
mapping,
Expand Down
Loading

0 comments on commit 883bdae

Please sign in to comment.