Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoPR datalake-analytics/data-plane] [ADLA] - Catalog - Add stream path to USqlTableFragment #4490

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions azure-mgmt-datalake-analytics/MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1 +1,6 @@
recursive-include tests *.py *.yaml
include *.rst
include azure/__init__.py
include azure/mgmt/__init__.py
include azure/mgmt/datalake/__init__.py

19 changes: 0 additions & 19 deletions azure-mgmt-datalake-analytics/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,6 @@ For the older Azure Service Management (ASM) libraries, see
For a more complete set of Azure libraries, see the `azure <https://pypi.python.org/pypi/azure>`__ bundle package.


Compatibility
=============

**IMPORTANT**: If you have an earlier version of the azure package
(version < 1.0), you should uninstall it before installing this package.

You can check the version using pip:

.. code:: shell

pip freeze

If you see azure==0.11.0 (or any version below 1.0), uninstall it first:

.. code:: shell

pip uninstall azure


Usage
=====

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class USqlIndex(Model):
~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo
:param partition_function: partition function ID for the index.
:type partition_function: str
:param partition_key_list: the list of partion keys in the index
:param partition_key_list: the list of partition keys in the index
:type partition_key_list: list[str]
:param stream_names: the list of full paths to the streams that contain
this index in the DataLake account.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class USqlIndex(Model):
~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo
:param partition_function: partition function ID for the index.
:type partition_function: str
:param partition_key_list: the list of partion keys in the index
:param partition_key_list: the list of partition keys in the index
:type partition_key_list: list[str]
:param stream_names: the list of full paths to the streams that contain
this index in the DataLake account.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,9 @@ class USqlTableColumn(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'column_name': {'key': 'columnName', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
}

def __init__(self, name=None, type=None, column_name=None, data_type=None):
super(USqlTableColumn, self).__init__()
self.name = name if name != None else column_name
self.type = type if type != None else data_type
def __init__(self, **kwargs):
super(USqlTableColumn, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,9 @@ class USqlTableColumn(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'column_name': {'key': 'columnName', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
}

def __init__(self, *, name: str=None, type: str=None, column_name=None, data_type=None, **kwargs) -> None:
def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None:
super(USqlTableColumn, self).__init__(**kwargs)
self.name = name if name != None else column_name
self.type = type if type != None else data_type
self.name = name
self.type = type
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ class USqlTableFragment(Model):
:type row_count: long
:param create_date: the creation time of the table fragment.
:type create_date: datetime
:param stream_path: the relative path for the table fragment location.
:type stream_path: str
"""

_attribute_map = {
Expand All @@ -38,6 +40,7 @@ class USqlTableFragment(Model):
'size': {'key': 'size', 'type': 'long'},
'row_count': {'key': 'rowCount', 'type': 'long'},
'create_date': {'key': 'createDate', 'type': 'iso-8601'},
'stream_path': {'key': 'streamPath', 'type': 'str'},
}

def __init__(self, **kwargs):
Expand All @@ -48,3 +51,4 @@ def __init__(self, **kwargs):
self.size = kwargs.get('size', None)
self.row_count = kwargs.get('row_count', None)
self.create_date = kwargs.get('create_date', None)
self.stream_path = kwargs.get('stream_path', None)
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ class USqlTableFragment(Model):
:type row_count: long
:param create_date: the creation time of the table fragment.
:type create_date: datetime
:param stream_path: the relative path for the table fragment location.
:type stream_path: str
"""

_attribute_map = {
Expand All @@ -38,13 +40,15 @@ class USqlTableFragment(Model):
'size': {'key': 'size', 'type': 'long'},
'row_count': {'key': 'rowCount', 'type': 'long'},
'create_date': {'key': 'createDate', 'type': 'iso-8601'},
'stream_path': {'key': 'streamPath', 'type': 'str'},
}

def __init__(self, *, parent_id: str=None, fragment_id: str=None, index_id: int=None, size: int=None, row_count: int=None, create_date=None, **kwargs) -> None:
def __init__(self, *, parent_id: str=None, fragment_id: str=None, index_id: int=None, size: int=None, row_count: int=None, create_date=None, stream_path: str=None, **kwargs) -> None:
super(USqlTableFragment, self).__init__(**kwargs)
self.parent_id = parent_id
self.fragment_id = fragment_id
self.index_id = index_id
self.size = size
self.row_count = row_count
self.create_date = create_date
self.stream_path = stream_path
Loading