From 6ba4bf75dfdce2b9a43a55e7ebea249588869c4a Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Wed, 14 Aug 2019 20:23:56 +0000 Subject: [PATCH] Generated from d22072afd73683450b42a2d626e10013330ab31b event triggers subcription apis --- .../azure/mgmt/datafactory/models/__init__.py | 5 + .../_data_factory_management_client_enums.py | 9 + .../azure/mgmt/datafactory/models/_models.py | 191 +++++++++++ .../mgmt/datafactory/models/_models_py3.py | 317 ++++++++++++++---- .../operations/_triggers_operations.py | 262 +++++++++++++++ 5 files changed, 721 insertions(+), 63 deletions(-) diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py index aae612d71bb6..b76d84d11b17 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py @@ -491,6 +491,7 @@ from ._models_py3 import TriggerResource from ._models_py3 import TriggerRun from ._models_py3 import TriggerRunsQueryResponse + from ._models_py3 import TriggerSubscriptionOperationStatus from ._models_py3 import TumblingWindowTrigger from ._models_py3 import TumblingWindowTriggerDependencyReference from ._models_py3 import UntilActivity @@ -1002,6 +1003,7 @@ from ._models import TriggerResource from ._models import TriggerRun from ._models import TriggerRunsQueryResponse + from ._models import TriggerSubscriptionOperationStatus from ._models import TumblingWindowTrigger from ._models import TumblingWindowTriggerDependencyReference from ._models import UntilActivity @@ -1046,6 +1048,7 @@ DependencyCondition, VariableType, TriggerRuntimeState, + EventSubscriptionStatus, RunQueryFilterOperand, RunQueryFilterOperator, RunQueryOrderByField, @@ -1594,6 +1597,7 @@ 'TriggerResource', 'TriggerRun', 'TriggerRunsQueryResponse', + 'TriggerSubscriptionOperationStatus', 'TumblingWindowTrigger', 'TumblingWindowTriggerDependencyReference', 'UntilActivity', @@ -1637,6 +1641,7 @@ 'DependencyCondition', 'VariableType', 'TriggerRuntimeState', + 'EventSubscriptionStatus', 'RunQueryFilterOperand', 'RunQueryFilterOperator', 'RunQueryOrderByField', diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_data_factory_management_client_enums.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_data_factory_management_client_enums.py index 45448073f831..eda785276fdb 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_data_factory_management_client_enums.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_data_factory_management_client_enums.py @@ -65,6 +65,15 @@ class TriggerRuntimeState(str, Enum): disabled = "Disabled" +class EventSubscriptionStatus(str, Enum): + + enabled = "Enabled" + provisioning = "Provisioning" + deprovisioning = "Deprovisioning" + disabled = "Disabled" + unknown = "Unknown" + + class RunQueryFilterOperand(str, Enum): pipeline_name = "PipelineName" diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py index bbb7b343f03d..b9f2574d0dc2 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py @@ -1565,6 +1565,10 @@ class CopySink(Model): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str """ @@ -1580,6 +1584,7 @@ class CopySink(Model): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, } @@ -1595,6 +1600,7 @@ def __init__(self, **kwargs): self.sink_retry_count = kwargs.get('sink_retry_count', None) self.sink_retry_wait = kwargs.get('sink_retry_wait', None) self.max_concurrent_connections = kwargs.get('max_concurrent_connections', None) + self.table_option = kwargs.get('table_option', None) self.type = None @@ -1624,6 +1630,10 @@ class AvroSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Avro store settings. @@ -1643,6 +1653,7 @@ class AvroSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, 'format_settings': {'key': 'formatSettings', 'type': 'AvroWriteSettings'}, @@ -2196,6 +2207,10 @@ class AzureBlobFSSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -2213,6 +2228,7 @@ class AzureBlobFSSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, } @@ -2909,6 +2925,10 @@ class AzureDataExplorerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param ingestion_mapping_name: A name of a pre-created csv mapping that @@ -2933,6 +2953,7 @@ class AzureDataExplorerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'ingestion_mapping_name': {'key': 'ingestionMappingName', 'type': 'object'}, 'ingestion_mapping_as_json': {'key': 'ingestionMappingAsJson', 'type': 'object'}, @@ -3432,6 +3453,10 @@ class AzureDataLakeStoreSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -3451,6 +3476,7 @@ class AzureDataLakeStoreSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, 'enable_adls_single_file_parallel': {'key': 'enableAdlsSingleFileParallel', 'type': 'object'}, @@ -4287,6 +4313,10 @@ class AzureMySqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -4305,6 +4335,7 @@ class AzureMySqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -4504,6 +4535,10 @@ class AzurePostgreSqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -4522,6 +4557,7 @@ class AzurePostgreSqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -4675,6 +4711,10 @@ class AzureQueueSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str """ @@ -4690,6 +4730,7 @@ class AzureQueueSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, } @@ -4785,6 +4826,10 @@ class AzureSearchIndexSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: Specify the write behavior when upserting documents @@ -4804,6 +4849,7 @@ class AzureSearchIndexSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, } @@ -5263,6 +5309,10 @@ class AzureSqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -5294,6 +5344,7 @@ class AzureSqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -5601,6 +5652,10 @@ class AzureTableSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param azure_table_default_partition_key_value: Azure Table default @@ -5628,6 +5683,7 @@ class AzureTableSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'azure_table_default_partition_key_value': {'key': 'azureTableDefaultPartitionKeyValue', 'type': 'object'}, 'azure_table_partition_key_name': {'key': 'azureTablePartitionKeyName', 'type': 'object'}, @@ -5857,6 +5913,10 @@ class BinarySink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Binary store settings. @@ -5874,6 +5934,7 @@ class BinarySink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, } @@ -6139,6 +6200,10 @@ class BlobSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param blob_writer_overwrite_files: Blob writer overwrite files. Type: @@ -6165,6 +6230,7 @@ class BlobSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'blob_writer_overwrite_files': {'key': 'blobWriterOverwriteFiles', 'type': 'object'}, 'blob_writer_date_time_format': {'key': 'blobWriterDateTimeFormat', 'type': 'object'}, @@ -6738,6 +6804,10 @@ class CommonDataServiceForAppsSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -6761,6 +6831,7 @@ class CommonDataServiceForAppsSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -7318,6 +7389,10 @@ class CosmosDbMongoDbApiSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: Specifies whether the document with same key to be @@ -7338,6 +7413,7 @@ class CosmosDbMongoDbApiSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'object'}, } @@ -8845,6 +8921,10 @@ class DelimitedTextSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: DelimitedText store settings. @@ -8865,6 +8945,7 @@ class DelimitedTextSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, 'format_settings': {'key': 'formatSettings', 'type': 'DelimitedTextWriteSettings'}, @@ -9117,6 +9198,10 @@ class DocumentDbCollectionSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param nesting_separator: Nested properties separator. Default is . (dot). @@ -9138,6 +9223,7 @@ class DocumentDbCollectionSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'nesting_separator': {'key': 'nestingSeparator', 'type': 'object'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'object'}, @@ -9748,6 +9834,10 @@ class DynamicsCrmSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -9771,6 +9861,7 @@ class DynamicsCrmSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -10016,6 +10107,10 @@ class DynamicsSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -10039,6 +10134,7 @@ class DynamicsSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -11194,6 +11290,10 @@ class FileSystemSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -11211,6 +11311,7 @@ class FileSystemSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, } @@ -14590,6 +14691,10 @@ class InformixSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -14608,6 +14713,7 @@ class InformixSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -16833,6 +16939,10 @@ class MicrosoftAccessSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -16851,6 +16961,7 @@ class MicrosoftAccessSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -18082,6 +18193,10 @@ class OdbcSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -18100,6 +18215,7 @@ class OdbcSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -18933,6 +19049,10 @@ class OracleSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: SQL pre-copy script. Type: string (or Expression @@ -18951,6 +19071,7 @@ class OracleSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } @@ -19280,6 +19401,10 @@ class ParquetSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Parquet store settings. @@ -19297,6 +19422,7 @@ class ParquetSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, } @@ -22112,6 +22238,10 @@ class SalesforceServiceCloudSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22144,6 +22274,7 @@ class SalesforceServiceCloudSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'external_id_field_name': {'key': 'externalIdFieldName', 'type': 'object'}, @@ -22235,6 +22366,10 @@ class SalesforceSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22267,6 +22402,7 @@ class SalesforceSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'external_id_field_name': {'key': 'externalIdFieldName', 'type': 'object'}, @@ -22659,6 +22795,10 @@ class SapCloudForCustomerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22678,6 +22818,7 @@ class SapCloudForCustomerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, } @@ -24988,6 +25129,10 @@ class SqlDWSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: SQL pre-copy script. Type: string (or Expression @@ -25013,6 +25158,7 @@ class SqlDWSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, 'allow_poly_base': {'key': 'allowPolyBase', 'type': 'object'}, @@ -25111,6 +25257,10 @@ class SqlMISink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25142,6 +25292,7 @@ class SqlMISink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -25309,6 +25460,10 @@ class SqlServerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25340,6 +25495,7 @@ class SqlServerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -25573,6 +25729,10 @@ class SqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25604,6 +25764,7 @@ class SqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -27233,6 +27394,36 @@ def __init__(self, **kwargs): self.continuation_token = kwargs.get('continuation_token', None) +class TriggerSubscriptionOperationStatus(Model): + """Defines the response of a trigger subscription operation. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar trigger_name: Trigger name. + :vartype trigger_name: str + :ivar status: Event Subscription Status. Possible values include: + 'Enabled', 'Provisioning', 'Deprovisioning', 'Disabled', 'Unknown' + :vartype status: str or + ~azure.mgmt.datafactory.models.EventSubscriptionStatus + """ + + _validation = { + 'trigger_name': {'readonly': True}, + 'status': {'readonly': True}, + } + + _attribute_map = { + 'trigger_name': {'key': 'triggerName', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TriggerSubscriptionOperationStatus, self).__init__(**kwargs) + self.trigger_name = None + self.status = None + + class TumblingWindowTrigger(Trigger): """Trigger that schedules pipeline runs for all fixed time interval windows from a start time without gaps and also supports backfill scenarios (when diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py index fb632f37b204..2417a41c42eb 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py @@ -1565,6 +1565,10 @@ class CopySink(Model): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str """ @@ -1580,6 +1584,7 @@ class CopySink(Model): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, } @@ -1587,7 +1592,7 @@ class CopySink(Model): 'type': {'CosmosDbMongoDbApiSink': 'CosmosDbMongoDbApiSink', 'SalesforceServiceCloudSink': 'SalesforceServiceCloudSink', 'SalesforceSink': 'SalesforceSink', 'AzureDataExplorerSink': 'AzureDataExplorerSink', 'CommonDataServiceForAppsSink': 'CommonDataServiceForAppsSink', 'DynamicsCrmSink': 'DynamicsCrmSink', 'DynamicsSink': 'DynamicsSink', 'MicrosoftAccessSink': 'MicrosoftAccessSink', 'InformixSink': 'InformixSink', 'OdbcSink': 'OdbcSink', 'AzureSearchIndexSink': 'AzureSearchIndexSink', 'AzureBlobFSSink': 'AzureBlobFSSink', 'AzureDataLakeStoreSink': 'AzureDataLakeStoreSink', 'OracleSink': 'OracleSink', 'SqlDWSink': 'SqlDWSink', 'SqlMISink': 'SqlMISink', 'AzureSqlSink': 'AzureSqlSink', 'SqlServerSink': 'SqlServerSink', 'SqlSink': 'SqlSink', 'DocumentDbCollectionSink': 'DocumentDbCollectionSink', 'FileSystemSink': 'FileSystemSink', 'BlobSink': 'BlobSink', 'BinarySink': 'BinarySink', 'ParquetSink': 'ParquetSink', 'AvroSink': 'AvroSink', 'AzureTableSink': 'AzureTableSink', 'AzureQueueSink': 'AzureQueueSink', 'SapCloudForCustomerSink': 'SapCloudForCustomerSink', 'AzureMySqlSink': 'AzureMySqlSink', 'AzurePostgreSqlSink': 'AzurePostgreSqlSink', 'DelimitedTextSink': 'DelimitedTextSink'} } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, **kwargs) -> None: + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, **kwargs) -> None: super(CopySink, self).__init__(**kwargs) self.additional_properties = additional_properties self.write_batch_size = write_batch_size @@ -1595,6 +1600,7 @@ def __init__(self, *, additional_properties=None, write_batch_size=None, write_b self.sink_retry_count = sink_retry_count self.sink_retry_wait = sink_retry_wait self.max_concurrent_connections = max_concurrent_connections + self.table_option = table_option self.type = None @@ -1624,6 +1630,10 @@ class AvroSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Avro store settings. @@ -1643,13 +1653,14 @@ class AvroSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, 'format_settings': {'key': 'formatSettings', 'type': 'AvroWriteSettings'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, store_settings=None, format_settings=None, **kwargs) -> None: - super(AvroSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, store_settings=None, format_settings=None, **kwargs) -> None: + super(AvroSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.store_settings = store_settings self.format_settings = format_settings self.type = 'AvroSink' @@ -2196,6 +2207,10 @@ class AzureBlobFSSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -2213,12 +2228,13 @@ class AzureBlobFSSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, copy_behavior=None, **kwargs) -> None: - super(AzureBlobFSSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, copy_behavior=None, **kwargs) -> None: + super(AzureBlobFSSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.copy_behavior = copy_behavior self.type = 'AzureBlobFSSink' @@ -2909,6 +2925,10 @@ class AzureDataExplorerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param ingestion_mapping_name: A name of a pre-created csv mapping that @@ -2933,14 +2953,15 @@ class AzureDataExplorerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'ingestion_mapping_name': {'key': 'ingestionMappingName', 'type': 'object'}, 'ingestion_mapping_as_json': {'key': 'ingestionMappingAsJson', 'type': 'object'}, 'flush_immediately': {'key': 'flushImmediately', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, ingestion_mapping_name=None, ingestion_mapping_as_json=None, flush_immediately=None, **kwargs) -> None: - super(AzureDataExplorerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, ingestion_mapping_name=None, ingestion_mapping_as_json=None, flush_immediately=None, **kwargs) -> None: + super(AzureDataExplorerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.ingestion_mapping_name = ingestion_mapping_name self.ingestion_mapping_as_json = ingestion_mapping_as_json self.flush_immediately = flush_immediately @@ -3432,6 +3453,10 @@ class AzureDataLakeStoreSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -3451,13 +3476,14 @@ class AzureDataLakeStoreSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, 'enable_adls_single_file_parallel': {'key': 'enableAdlsSingleFileParallel', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, copy_behavior=None, enable_adls_single_file_parallel=None, **kwargs) -> None: - super(AzureDataLakeStoreSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, copy_behavior=None, enable_adls_single_file_parallel=None, **kwargs) -> None: + super(AzureDataLakeStoreSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.copy_behavior = copy_behavior self.enable_adls_single_file_parallel = enable_adls_single_file_parallel self.type = 'AzureDataLakeStoreSink' @@ -4287,6 +4313,10 @@ class AzureMySqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -4305,12 +4335,13 @@ class AzureMySqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(AzureMySqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(AzureMySqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'AzureMySqlSink' @@ -4504,6 +4535,10 @@ class AzurePostgreSqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -4522,12 +4557,13 @@ class AzurePostgreSqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(AzurePostgreSqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(AzurePostgreSqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'AzurePostgreSqlSink' @@ -4675,6 +4711,10 @@ class AzureQueueSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str """ @@ -4690,11 +4730,12 @@ class AzureQueueSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, **kwargs) -> None: - super(AzureQueueSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, **kwargs) -> None: + super(AzureQueueSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.type = 'AzureQueueSink' @@ -4785,6 +4826,10 @@ class AzureSearchIndexSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: Specify the write behavior when upserting documents @@ -4804,12 +4849,13 @@ class AzureSearchIndexSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, write_behavior=None, **kwargs) -> None: - super(AzureSearchIndexSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, write_behavior=None, **kwargs) -> None: + super(AzureSearchIndexSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.write_behavior = write_behavior self.type = 'AzureSearchIndexSink' @@ -5263,6 +5309,10 @@ class AzureSqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -5294,6 +5344,7 @@ class AzureSqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -5302,8 +5353,8 @@ class AzureSqlSink(CopySink): 'stored_procedure_table_type_parameter_name': {'key': 'storedProcedureTableTypeParameterName', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: - super(AzureSqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: + super(AzureSqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.sql_writer_stored_procedure_name = sql_writer_stored_procedure_name self.sql_writer_table_type = sql_writer_table_type self.pre_copy_script = pre_copy_script @@ -5601,6 +5652,10 @@ class AzureTableSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param azure_table_default_partition_key_value: Azure Table default @@ -5628,6 +5683,7 @@ class AzureTableSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'azure_table_default_partition_key_value': {'key': 'azureTableDefaultPartitionKeyValue', 'type': 'object'}, 'azure_table_partition_key_name': {'key': 'azureTablePartitionKeyName', 'type': 'object'}, @@ -5635,8 +5691,8 @@ class AzureTableSink(CopySink): 'azure_table_insert_type': {'key': 'azureTableInsertType', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, azure_table_default_partition_key_value=None, azure_table_partition_key_name=None, azure_table_row_key_name=None, azure_table_insert_type=None, **kwargs) -> None: - super(AzureTableSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, azure_table_default_partition_key_value=None, azure_table_partition_key_name=None, azure_table_row_key_name=None, azure_table_insert_type=None, **kwargs) -> None: + super(AzureTableSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.azure_table_default_partition_key_value = azure_table_default_partition_key_value self.azure_table_partition_key_name = azure_table_partition_key_name self.azure_table_row_key_name = azure_table_row_key_name @@ -5857,6 +5913,10 @@ class BinarySink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Binary store settings. @@ -5874,12 +5934,13 @@ class BinarySink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, store_settings=None, **kwargs) -> None: - super(BinarySink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, store_settings=None, **kwargs) -> None: + super(BinarySink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.store_settings = store_settings self.type = 'BinarySink' @@ -6139,6 +6200,10 @@ class BlobSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param blob_writer_overwrite_files: Blob writer overwrite files. Type: @@ -6165,6 +6230,7 @@ class BlobSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'blob_writer_overwrite_files': {'key': 'blobWriterOverwriteFiles', 'type': 'object'}, 'blob_writer_date_time_format': {'key': 'blobWriterDateTimeFormat', 'type': 'object'}, @@ -6172,8 +6238,8 @@ class BlobSink(CopySink): 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, blob_writer_overwrite_files=None, blob_writer_date_time_format=None, blob_writer_add_header=None, copy_behavior=None, **kwargs) -> None: - super(BlobSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, blob_writer_overwrite_files=None, blob_writer_date_time_format=None, blob_writer_add_header=None, copy_behavior=None, **kwargs) -> None: + super(BlobSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.blob_writer_overwrite_files = blob_writer_overwrite_files self.blob_writer_date_time_format = blob_writer_date_time_format self.blob_writer_add_header = blob_writer_add_header @@ -6738,6 +6804,10 @@ class CommonDataServiceForAppsSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -6761,6 +6831,7 @@ class CommonDataServiceForAppsSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -6768,8 +6839,8 @@ class CommonDataServiceForAppsSink(CopySink): write_behavior = "Upsert" - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, ignore_null_values=None, **kwargs) -> None: - super(CommonDataServiceForAppsSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, ignore_null_values=None, **kwargs) -> None: + super(CommonDataServiceForAppsSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.ignore_null_values = ignore_null_values self.type = 'CommonDataServiceForAppsSink' @@ -7318,6 +7389,10 @@ class CosmosDbMongoDbApiSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: Specifies whether the document with same key to be @@ -7338,12 +7413,13 @@ class CosmosDbMongoDbApiSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, write_behavior=None, **kwargs) -> None: - super(CosmosDbMongoDbApiSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, write_behavior=None, **kwargs) -> None: + super(CosmosDbMongoDbApiSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.write_behavior = write_behavior self.type = 'CosmosDbMongoDbApiSink' @@ -8845,6 +8921,10 @@ class DelimitedTextSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: DelimitedText store settings. @@ -8865,13 +8945,14 @@ class DelimitedTextSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, 'format_settings': {'key': 'formatSettings', 'type': 'DelimitedTextWriteSettings'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, store_settings=None, format_settings=None, **kwargs) -> None: - super(DelimitedTextSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, store_settings=None, format_settings=None, **kwargs) -> None: + super(DelimitedTextSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.store_settings = store_settings self.format_settings = format_settings self.type = 'DelimitedTextSink' @@ -9117,6 +9198,10 @@ class DocumentDbCollectionSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param nesting_separator: Nested properties separator. Default is . (dot). @@ -9138,13 +9223,14 @@ class DocumentDbCollectionSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'nesting_separator': {'key': 'nestingSeparator', 'type': 'object'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, nesting_separator=None, write_behavior=None, **kwargs) -> None: - super(DocumentDbCollectionSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, nesting_separator=None, write_behavior=None, **kwargs) -> None: + super(DocumentDbCollectionSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.nesting_separator = nesting_separator self.write_behavior = write_behavior self.type = 'DocumentDbCollectionSink' @@ -9748,6 +9834,10 @@ class DynamicsCrmSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -9771,6 +9861,7 @@ class DynamicsCrmSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -9778,8 +9869,8 @@ class DynamicsCrmSink(CopySink): write_behavior = "Upsert" - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, ignore_null_values=None, **kwargs) -> None: - super(DynamicsCrmSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, ignore_null_values=None, **kwargs) -> None: + super(DynamicsCrmSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.ignore_null_values = ignore_null_values self.type = 'DynamicsCrmSink' @@ -10016,6 +10107,10 @@ class DynamicsSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :ivar write_behavior: Required. The write behavior for the operation. @@ -10039,6 +10134,7 @@ class DynamicsSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, @@ -10046,8 +10142,8 @@ class DynamicsSink(CopySink): write_behavior = "Upsert" - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, ignore_null_values=None, **kwargs) -> None: - super(DynamicsSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, ignore_null_values=None, **kwargs) -> None: + super(DynamicsSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.ignore_null_values = ignore_null_values self.type = 'DynamicsSink' @@ -11194,6 +11290,10 @@ class FileSystemSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param copy_behavior: The type of copy behavior for copy sink. @@ -11211,12 +11311,13 @@ class FileSystemSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'copy_behavior': {'key': 'copyBehavior', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, copy_behavior=None, **kwargs) -> None: - super(FileSystemSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, copy_behavior=None, **kwargs) -> None: + super(FileSystemSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.copy_behavior = copy_behavior self.type = 'FileSystemSink' @@ -14590,6 +14691,10 @@ class InformixSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -14608,12 +14713,13 @@ class InformixSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(InformixSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(InformixSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'InformixSink' @@ -16833,6 +16939,10 @@ class MicrosoftAccessSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -16851,12 +16961,13 @@ class MicrosoftAccessSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(MicrosoftAccessSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(MicrosoftAccessSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'MicrosoftAccessSink' @@ -18082,6 +18193,10 @@ class OdbcSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: A query to execute before starting the copy. Type: @@ -18100,12 +18215,13 @@ class OdbcSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(OdbcSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(OdbcSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'OdbcSink' @@ -18933,6 +19049,10 @@ class OracleSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: SQL pre-copy script. Type: string (or Expression @@ -18951,12 +19071,13 @@ class OracleSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, **kwargs) -> None: - super(OracleSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, **kwargs) -> None: + super(OracleSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.type = 'OracleSink' @@ -19280,6 +19401,10 @@ class ParquetSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param store_settings: Parquet store settings. @@ -19297,12 +19422,13 @@ class ParquetSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'store_settings': {'key': 'storeSettings', 'type': 'StoreWriteSettings'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, store_settings=None, **kwargs) -> None: - super(ParquetSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, store_settings=None, **kwargs) -> None: + super(ParquetSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.store_settings = store_settings self.type = 'ParquetSink' @@ -22112,6 +22238,10 @@ class SalesforceServiceCloudSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22144,14 +22274,15 @@ class SalesforceServiceCloudSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'external_id_field_name': {'key': 'externalIdFieldName', 'type': 'object'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, write_behavior=None, external_id_field_name=None, ignore_null_values=None, **kwargs) -> None: - super(SalesforceServiceCloudSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, write_behavior=None, external_id_field_name=None, ignore_null_values=None, **kwargs) -> None: + super(SalesforceServiceCloudSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.write_behavior = write_behavior self.external_id_field_name = external_id_field_name self.ignore_null_values = ignore_null_values @@ -22235,6 +22366,10 @@ class SalesforceSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22267,14 +22402,15 @@ class SalesforceSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, 'external_id_field_name': {'key': 'externalIdFieldName', 'type': 'object'}, 'ignore_null_values': {'key': 'ignoreNullValues', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, write_behavior=None, external_id_field_name=None, ignore_null_values=None, **kwargs) -> None: - super(SalesforceSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, write_behavior=None, external_id_field_name=None, ignore_null_values=None, **kwargs) -> None: + super(SalesforceSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.write_behavior = write_behavior self.external_id_field_name = external_id_field_name self.ignore_null_values = ignore_null_values @@ -22659,6 +22795,10 @@ class SapCloudForCustomerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param write_behavior: The write behavior for the operation. Default is @@ -22678,12 +22818,13 @@ class SapCloudForCustomerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'write_behavior': {'key': 'writeBehavior', 'type': 'str'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, write_behavior=None, **kwargs) -> None: - super(SapCloudForCustomerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, write_behavior=None, **kwargs) -> None: + super(SapCloudForCustomerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.write_behavior = write_behavior self.type = 'SapCloudForCustomerSink' @@ -24988,6 +25129,10 @@ class SqlDWSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param pre_copy_script: SQL pre-copy script. Type: string (or Expression @@ -25013,14 +25158,15 @@ class SqlDWSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'pre_copy_script': {'key': 'preCopyScript', 'type': 'object'}, 'allow_poly_base': {'key': 'allowPolyBase', 'type': 'object'}, 'poly_base_settings': {'key': 'polyBaseSettings', 'type': 'PolybaseSettings'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, pre_copy_script=None, allow_poly_base=None, poly_base_settings=None, **kwargs) -> None: - super(SqlDWSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, pre_copy_script=None, allow_poly_base=None, poly_base_settings=None, **kwargs) -> None: + super(SqlDWSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.pre_copy_script = pre_copy_script self.allow_poly_base = allow_poly_base self.poly_base_settings = poly_base_settings @@ -25111,6 +25257,10 @@ class SqlMISink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25142,6 +25292,7 @@ class SqlMISink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -25150,8 +25301,8 @@ class SqlMISink(CopySink): 'stored_procedure_table_type_parameter_name': {'key': 'storedProcedureTableTypeParameterName', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: - super(SqlMISink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: + super(SqlMISink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.sql_writer_stored_procedure_name = sql_writer_stored_procedure_name self.sql_writer_table_type = sql_writer_table_type self.pre_copy_script = pre_copy_script @@ -25309,6 +25460,10 @@ class SqlServerSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25340,6 +25495,7 @@ class SqlServerSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -25348,8 +25504,8 @@ class SqlServerSink(CopySink): 'stored_procedure_table_type_parameter_name': {'key': 'storedProcedureTableTypeParameterName', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: - super(SqlServerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: + super(SqlServerSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.sql_writer_stored_procedure_name = sql_writer_stored_procedure_name self.sql_writer_table_type = sql_writer_table_type self.pre_copy_script = pre_copy_script @@ -25573,6 +25729,10 @@ class SqlSink(CopySink): for the sink data store. Type: integer (or Expression with resultType integer). :type max_concurrent_connections: object + :param table_option: The option to handle sink table, such as autoCreate. + For now only 'autoCreate' value is supported. Type: string (or Expression + with resultType string). + :type table_option: object :param type: Required. Constant filled by server. :type type: str :param sql_writer_stored_procedure_name: SQL writer stored procedure name. @@ -25604,6 +25764,7 @@ class SqlSink(CopySink): 'sink_retry_count': {'key': 'sinkRetryCount', 'type': 'object'}, 'sink_retry_wait': {'key': 'sinkRetryWait', 'type': 'object'}, 'max_concurrent_connections': {'key': 'maxConcurrentConnections', 'type': 'object'}, + 'table_option': {'key': 'tableOption', 'type': 'object'}, 'type': {'key': 'type', 'type': 'str'}, 'sql_writer_stored_procedure_name': {'key': 'sqlWriterStoredProcedureName', 'type': 'object'}, 'sql_writer_table_type': {'key': 'sqlWriterTableType', 'type': 'object'}, @@ -25612,8 +25773,8 @@ class SqlSink(CopySink): 'stored_procedure_table_type_parameter_name': {'key': 'storedProcedureTableTypeParameterName', 'type': 'object'}, } - def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: - super(SqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, **kwargs) + def __init__(self, *, additional_properties=None, write_batch_size=None, write_batch_timeout=None, sink_retry_count=None, sink_retry_wait=None, max_concurrent_connections=None, table_option=None, sql_writer_stored_procedure_name=None, sql_writer_table_type=None, pre_copy_script=None, stored_procedure_parameters=None, stored_procedure_table_type_parameter_name=None, **kwargs) -> None: + super(SqlSink, self).__init__(additional_properties=additional_properties, write_batch_size=write_batch_size, write_batch_timeout=write_batch_timeout, sink_retry_count=sink_retry_count, sink_retry_wait=sink_retry_wait, max_concurrent_connections=max_concurrent_connections, table_option=table_option, **kwargs) self.sql_writer_stored_procedure_name = sql_writer_stored_procedure_name self.sql_writer_table_type = sql_writer_table_type self.pre_copy_script = pre_copy_script @@ -27233,6 +27394,36 @@ def __init__(self, *, value, continuation_token: str=None, **kwargs) -> None: self.continuation_token = continuation_token +class TriggerSubscriptionOperationStatus(Model): + """Defines the response of a trigger subscription operation. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar trigger_name: Trigger name. + :vartype trigger_name: str + :ivar status: Event Subscription Status. Possible values include: + 'Enabled', 'Provisioning', 'Deprovisioning', 'Disabled', 'Unknown' + :vartype status: str or + ~azure.mgmt.datafactory.models.EventSubscriptionStatus + """ + + _validation = { + 'trigger_name': {'readonly': True}, + 'status': {'readonly': True}, + } + + _attribute_map = { + 'trigger_name': {'key': 'triggerName', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(TriggerSubscriptionOperationStatus, self).__init__(**kwargs) + self.trigger_name = None + self.status = None + + class TumblingWindowTrigger(Trigger): """Trigger that schedules pipeline runs for all fixed time interval windows from a start time without gaps and also supports backfill scenarios (when diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/operations/_triggers_operations.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/operations/_triggers_operations.py index caeda2fcdc91..57e31b1bd8c9 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/operations/_triggers_operations.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/operations/_triggers_operations.py @@ -318,6 +318,268 @@ def delete( delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}'} + def _subscribe_to_events_initial( + self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.subscribe_to_events.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), + 'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'), + 'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('TriggerSubscriptionOperationStatus', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def subscribe_to_events( + self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Subscribe event trigger to events. + + :param resource_group_name: The resource group name. + :type resource_group_name: str + :param factory_name: The factory name. + :type factory_name: str + :param trigger_name: The trigger name. + :type trigger_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns + TriggerSubscriptionOperationStatus or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.datafactory.models.TriggerSubscriptionOperationStatus] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.datafactory.models.TriggerSubscriptionOperationStatus]] + :raises: :class:`CloudError` + """ + raw_result = self._subscribe_to_events_initial( + resource_group_name=resource_group_name, + factory_name=factory_name, + trigger_name=trigger_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('TriggerSubscriptionOperationStatus', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + subscribe_to_events.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/subscribeToEvents'} + + def get_event_subscription_status( + self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, **operation_config): + """Get a trigger's event subscription status. + + :param resource_group_name: The resource group name. + :type resource_group_name: str + :param factory_name: The factory name. + :type factory_name: str + :param trigger_name: The trigger name. + :type trigger_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TriggerSubscriptionOperationStatus or ClientRawResponse if + raw=true + :rtype: + ~azure.mgmt.datafactory.models.TriggerSubscriptionOperationStatus or + ~msrest.pipeline.ClientRawResponse + :raises: :class:`CloudError` + """ + # Construct URL + url = self.get_event_subscription_status.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), + 'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'), + 'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('TriggerSubscriptionOperationStatus', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_event_subscription_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/getEventSubscriptionStatus'} + + + def _unsubscribe_from_events_initial( + self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.unsubscribe_from_events.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), + 'factoryName': self._serialize.url("factory_name", factory_name, 'str', max_length=63, min_length=3, pattern=r'^[A-Za-z0-9]+(?:-[A-Za-z0-9]+)*$'), + 'triggerName': self._serialize.url("trigger_name", trigger_name, 'str', max_length=260, min_length=1, pattern=r'^[A-Za-z0-9_][^<>*#.%&:\\+?/]*$') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202]: + exp = CloudError(response) + exp.request_id = response.headers.get('x-ms-request-id') + raise exp + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('TriggerSubscriptionOperationStatus', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def unsubscribe_from_events( + self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Unsubscribe event trigger from events. + + :param resource_group_name: The resource group name. + :type resource_group_name: str + :param factory_name: The factory name. + :type factory_name: str + :param trigger_name: The trigger name. + :type trigger_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns + TriggerSubscriptionOperationStatus or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.datafactory.models.TriggerSubscriptionOperationStatus] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.datafactory.models.TriggerSubscriptionOperationStatus]] + :raises: :class:`CloudError` + """ + raw_result = self._unsubscribe_from_events_initial( + resource_group_name=resource_group_name, + factory_name=factory_name, + trigger_name=trigger_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('TriggerSubscriptionOperationStatus', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + unsubscribe_from_events.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/triggers/{triggerName}/unsubscribeFromEvents'} + + def _start_initial( self, resource_group_name, factory_name, trigger_name, custom_headers=None, raw=False, **operation_config): # Construct URL