Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AutoPR datafactory/resource-manager] A series of Public swagger update (modification) #5510

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
150 changes: 128 additions & 22 deletions azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .dataset_location import DatasetLocation


class AmazonS3Location(DatasetLocation):
"""The location of amazon S3 dataset.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. Type of dataset storage location.
:type type: str
:param folder_path: Specify the folder path of dataset. Type: string (or
Expression with resultType string)
:type folder_path: object
:param file_name: Specify the file name of dataset. Type: string (or
Expression with resultType string).
:type file_name: object
:param bucket_name: Specify the bucketName of amazon S3. Type: string (or
Expression with resultType string)
:type bucket_name: object
:param version: Specify the version of amazon S3. Type: string (or
Expression with resultType string).
:type version: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
'bucket_name': {'key': 'bucketName', 'type': 'object'},
'version': {'key': 'version', 'type': 'object'},
}

def __init__(self, **kwargs):
super(AmazonS3Location, self).__init__(**kwargs)
self.bucket_name = kwargs.get('bucket_name', None)
self.version = kwargs.get('version', None)
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .dataset_location_py3 import DatasetLocation


class AmazonS3Location(DatasetLocation):
"""The location of amazon S3 dataset.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. Type of dataset storage location.
:type type: str
:param folder_path: Specify the folder path of dataset. Type: string (or
Expression with resultType string)
:type folder_path: object
:param file_name: Specify the file name of dataset. Type: string (or
Expression with resultType string).
:type file_name: object
:param bucket_name: Specify the bucketName of amazon S3. Type: string (or
Expression with resultType string)
:type bucket_name: object
:param version: Specify the version of amazon S3. Type: string (or
Expression with resultType string).
:type version: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
'bucket_name': {'key': 'bucketName', 'type': 'object'},
'version': {'key': 'version', 'type': 'object'},
}

def __init__(self, *, type: str, additional_properties=None, folder_path=None, file_name=None, bucket_name=None, version=None, **kwargs) -> None:
super(AmazonS3Location, self).__init__(additional_properties=additional_properties, type=type, folder_path=folder_path, file_name=file_name, **kwargs)
self.bucket_name = bucket_name
self.version = version
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .connector_read_setting import ConnectorReadSetting


class AmazonS3ReadSetting(ConnectorReadSetting):
"""Azure data lake store read settings.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. The read setting type.
:type type: str
:param max_concurrent_connections: The maximum concurrent connection count
for the source data store. Type: integer (or Expression with resultType
integer).
:type max_concurrent_connections: object
:param recursive: If true, files under the folder path will be read
recursively. Default is true. Type: boolean (or Expression with resultType
boolean).
:type recursive: object
:param wildcard_folder_path: AmazonS3 wildcardFolderPath. Type: string (or
Expression with resultType string).
:type wildcard_folder_path: object
:param wildcard_file_name: AmazonS3 wildcardFileName. Type: string (or
Expression with resultType string).
:type wildcard_file_name: object
:param prefix: The prefix filter for the S3 object name. Type: string (or
Expression with resultType string).
:type prefix: object
:param enable_partition_discovery: Indicates whether to enable partition
discovery.
:type enable_partition_discovery: bool
:param modified_datetime_start: The start of file's modified datetime.
Type: string (or Expression with resultType string).
:type modified_datetime_start: object
:param modified_datetime_end: The end of file's modified datetime. Type:
string (or Expression with resultType string).
:type modified_datetime_end: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'},
'recursive': {'key': 'recursive', 'type': 'object'},
'wildcard_folder_path': {'key': 'wildcardFolderPath', 'type': 'object'},
'wildcard_file_name': {'key': 'wildcardFileName', 'type': 'object'},
'prefix': {'key': 'prefix', 'type': 'object'},
'enable_partition_discovery': {'key': 'enablePartitionDiscovery', 'type': 'bool'},
'modified_datetime_start': {'key': 'modifiedDatetimeStart', 'type': 'object'},
'modified_datetime_end': {'key': 'modifiedDatetimeEnd', 'type': 'object'},
}

def __init__(self, **kwargs):
super(AmazonS3ReadSetting, self).__init__(**kwargs)
self.recursive = kwargs.get('recursive', None)
self.wildcard_folder_path = kwargs.get('wildcard_folder_path', None)
self.wildcard_file_name = kwargs.get('wildcard_file_name', None)
self.prefix = kwargs.get('prefix', None)
self.enable_partition_discovery = kwargs.get('enable_partition_discovery', None)
self.modified_datetime_start = kwargs.get('modified_datetime_start', None)
self.modified_datetime_end = kwargs.get('modified_datetime_end', None)
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .connector_read_setting_py3 import ConnectorReadSetting


class AmazonS3ReadSetting(ConnectorReadSetting):
"""Azure data lake store read settings.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. The read setting type.
:type type: str
:param max_concurrent_connections: The maximum concurrent connection count
for the source data store. Type: integer (or Expression with resultType
integer).
:type max_concurrent_connections: object
:param recursive: If true, files under the folder path will be read
recursively. Default is true. Type: boolean (or Expression with resultType
boolean).
:type recursive: object
:param wildcard_folder_path: AmazonS3 wildcardFolderPath. Type: string (or
Expression with resultType string).
:type wildcard_folder_path: object
:param wildcard_file_name: AmazonS3 wildcardFileName. Type: string (or
Expression with resultType string).
:type wildcard_file_name: object
:param prefix: The prefix filter for the S3 object name. Type: string (or
Expression with resultType string).
:type prefix: object
:param enable_partition_discovery: Indicates whether to enable partition
discovery.
:type enable_partition_discovery: bool
:param modified_datetime_start: The start of file's modified datetime.
Type: string (or Expression with resultType string).
:type modified_datetime_start: object
:param modified_datetime_end: The end of file's modified datetime. Type:
string (or Expression with resultType string).
:type modified_datetime_end: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'},
'recursive': {'key': 'recursive', 'type': 'object'},
'wildcard_folder_path': {'key': 'wildcardFolderPath', 'type': 'object'},
'wildcard_file_name': {'key': 'wildcardFileName', 'type': 'object'},
'prefix': {'key': 'prefix', 'type': 'object'},
'enable_partition_discovery': {'key': 'enablePartitionDiscovery', 'type': 'bool'},
'modified_datetime_start': {'key': 'modifiedDatetimeStart', 'type': 'object'},
'modified_datetime_end': {'key': 'modifiedDatetimeEnd', 'type': 'object'},
}

def __init__(self, *, type: str, additional_properties=None, max_concurrent_connections=None, recursive=None, wildcard_folder_path=None, wildcard_file_name=None, prefix=None, enable_partition_discovery: bool=None, modified_datetime_start=None, modified_datetime_end=None, **kwargs) -> None:
super(AmazonS3ReadSetting, self).__init__(additional_properties=additional_properties, type=type, max_concurrent_connections=max_concurrent_connections, **kwargs)
self.recursive = recursive
self.wildcard_folder_path = wildcard_folder_path
self.wildcard_file_name = wildcard_file_name
self.prefix = prefix
self.enable_partition_discovery = enable_partition_discovery
self.modified_datetime_start = modified_datetime_start
self.modified_datetime_end = modified_datetime_end
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .dataset_location import DatasetLocation


class AzureBlobFSLocation(DatasetLocation):
"""The location of azure blobFS dataset.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. Type of dataset storage location.
:type type: str
:param folder_path: Specify the folder path of dataset. Type: string (or
Expression with resultType string)
:type folder_path: object
:param file_name: Specify the file name of dataset. Type: string (or
Expression with resultType string).
:type file_name: object
:param file_system: Specify the fileSystem of azure blobFS. Type: string
(or Expression with resultType string).
:type file_system: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
'file_system': {'key': 'fileSystem', 'type': 'object'},
}

def __init__(self, **kwargs):
super(AzureBlobFSLocation, self).__init__(**kwargs)
self.file_system = kwargs.get('file_system', None)
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .dataset_location_py3 import DatasetLocation


class AzureBlobFSLocation(DatasetLocation):
"""The location of azure blobFS dataset.

All required parameters must be populated in order to send to Azure.

:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param type: Required. Type of dataset storage location.
:type type: str
:param folder_path: Specify the folder path of dataset. Type: string (or
Expression with resultType string)
:type folder_path: object
:param file_name: Specify the file name of dataset. Type: string (or
Expression with resultType string).
:type file_name: object
:param file_system: Specify the fileSystem of azure blobFS. Type: string
(or Expression with resultType string).
:type file_system: object
"""

_validation = {
'type': {'required': True},
}

_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'type': {'key': 'type', 'type': 'str'},
'folder_path': {'key': 'folderPath', 'type': 'object'},
'file_name': {'key': 'fileName', 'type': 'object'},
'file_system': {'key': 'fileSystem', 'type': 'object'},
}

def __init__(self, *, type: str, additional_properties=None, folder_path=None, file_name=None, file_system=None, **kwargs) -> None:
super(AzureBlobFSLocation, self).__init__(additional_properties=additional_properties, type=type, folder_path=folder_path, file_name=file_name, **kwargs)
self.file_system = file_system
Loading