diff --git a/.changes/0ea738da-3935-4cdc-b1a2-591f70831d15.json b/.changes/0ea738da-3935-4cdc-b1a2-591f70831d15.json new file mode 100644 index 00000000000..7854573169c --- /dev/null +++ b/.changes/0ea738da-3935-4cdc-b1a2-591f70831d15.json @@ -0,0 +1,5 @@ +{ + "id": "0ea738da-3935-4cdc-b1a2-591f70831d15", + "type": "misc", + "description": "sync AWS models and upgrade smithy kotlin" +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/amplifyuibuilder.json b/codegen/sdk/aws-models/amplifyuibuilder.json index 4fd9dc12887..66369971de2 100644 --- a/codegen/sdk/aws-models/amplifyuibuilder.json +++ b/codegen/sdk/aws-models/amplifyuibuilder.json @@ -1011,6 +1011,44 @@ "target": "smithy.api#String" } }, + "com.amazonaws.amplifyuibuilder#CodegenDependencies": { + "type": "list", + "member": { + "target": "com.amazonaws.amplifyuibuilder#CodegenDependency" + } + }, + "com.amazonaws.amplifyuibuilder#CodegenDependency": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "
Name of the dependency package.
" + } + }, + "supportedVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Indicates the version of the supported dependency package.
" + } + }, + "isSemVer": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "Determines if the dependency package is using Semantic versioning. If set to true, it indicates that the dependency package uses Semantic versioning.
" + } + }, + "reason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Indicates the reason to include the dependency package in your project code.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Dependency package that may be required for the project code to run.
" + } + }, "com.amazonaws.amplifyuibuilder#CodegenFeatureFlags": { "type": "structure", "members": { @@ -1452,6 +1490,12 @@ "smithy.api#documentation": "The time that the code generation job was modified.
", "smithy.api#timestampFormat": "date-time" } + }, + "dependencies": { + "target": "com.amazonaws.amplifyuibuilder#CodegenDependencies", + "traits": { + "smithy.api#documentation": "Lists the dependency packages that may be required for the project code to run.
" + } } }, "traits": { @@ -3027,7 +3071,7 @@ } ], "traits": { - "smithy.api#documentation": "Exchanges an access code for a token.
", + "smithy.api#documentation": "This is for internal use.
\nAmplify uses this action to exchange an access code for a token.
", "smithy.api#http": { "uri": "/tokens/{provider}", "method": "POST" @@ -5246,6 +5290,18 @@ } } }, + "com.amazonaws.amplifyuibuilder#ReactCodegenDependencies": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#documentation": "Indicates the dependency version of the project code to be generated by Amazon Web Services Amplify. The version of the generated code output is determined by the version number contained in aws-amplify
.
The API configuration for the code generation job.
" } + }, + "dependencies": { + "target": "com.amazonaws.amplifyuibuilder#ReactCodegenDependencies", + "traits": { + "smithy.api#documentation": "Lists the dependency packages that may be required for the project code to run.
" + } } }, "traits": { @@ -5306,7 +5368,7 @@ } ], "traits": { - "smithy.api#documentation": "Refreshes a previously issued access token that might have expired.
", + "smithy.api#documentation": "This is for internal use.
\nAmplify uses this action to refresh a previously issued access token that might have expired.
", "smithy.api#http": { "uri": "/tokens/{provider}/refresh", "method": "POST" diff --git a/codegen/sdk/aws-models/appconfig.json b/codegen/sdk/aws-models/appconfig.json index d5a67001b2e..7af8cbec1b0 100644 --- a/codegen/sdk/aws-models/appconfig.json +++ b/codegen/sdk/aws-models/appconfig.json @@ -1673,6 +1673,9 @@ }, { "target": "com.amazonaws.appconfig#InternalServerException" + }, + { + "target": "com.amazonaws.appconfig#ServiceQuotaExceededException" } ], "traits": { @@ -1743,6 +1746,9 @@ }, { "target": "com.amazonaws.appconfig#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appconfig#ServiceQuotaExceededException" } ], "traits": { @@ -1800,7 +1806,7 @@ "LocationUri": { "target": "com.amazonaws.appconfig#Uri", "traits": { - "smithy.api#documentation": "A URI to locate the configuration. You can specify the following:
\nFor the AppConfig hosted configuration store and for feature flags,\n specify hosted
.
For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter name in\n the format ssm-parameter://
or the ARN.
For an Secrets Manager secret, specify the URI in the following format:\n secrets-manager
://
For an Amazon S3 object, specify the URI in the following format:\n s3://
. Here is an example:\n s3://my-bucket/my-app/us-east-1/my-config.json
\n
For an SSM document, specify either the document name in the format\n ssm-document://
or the Amazon Resource Name\n (ARN).
A URI to locate the configuration. You can specify the following:
\nFor the AppConfig hosted configuration store and for feature flags,\n specify hosted
.
For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter name in\n the format ssm-parameter://
or the ARN.
For an Amazon Web Services\n CodePipeline pipeline, specify the URI in the following format:\n codepipeline
://
For an Secrets Manager secret, specify the URI in the following format:\n secretsmanager
://
For an Amazon S3 object, specify the URI in the following format:\n s3://
. Here is an example:\n s3://my-bucket/my-app/us-east-1/my-config.json
\n
For an SSM document, specify either the document name in the format\n ssm-document://
or the Amazon Resource Name\n (ARN).
Creates an AppConfig extension. An extension augments your ability to inject\n logic or behavior at different points during the AppConfig workflow of creating\n or deploying a configuration.
\nYou can create your own extensions or use the Amazon Web Services authored extensions provided by\n AppConfig. For most use cases, to create your own extension, you must create\n an Lambda function to perform any computation and processing defined in the\n extension. For more information about extensions, see Working with\n AppConfig extensions in the\n AppConfig User Guide.
", + "smithy.api#documentation": "Creates an AppConfig extension. An extension augments your ability to inject\n logic or behavior at different points during the AppConfig workflow of creating\n or deploying a configuration.
\nYou can create your own extensions or use the Amazon Web Services authored extensions provided by\n AppConfig. For an AppConfig extension that uses Lambda, you must create a Lambda function to perform any computation and processing\n defined in the extension. If you plan to create custom versions of the Amazon Web Services\n authored notification extensions, you only need to specify an Amazon Resource Name (ARN) in\n the Uri
field for the new extension version.
For a custom EventBridge notification extension, enter the ARN of the EventBridge\n default events in the Uri
field.
For a custom Amazon SNS notification extension, enter the ARN of an Amazon SNS\n topic in the Uri
field.
For a custom Amazon SQS notification extension, enter the ARN of an Amazon SQS\n message queue in the Uri
field.
For more information about extensions, see Working with\n AppConfig extensions in the\n AppConfig User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/extensions", @@ -2286,7 +2298,7 @@ "VersionLabel": { "target": "com.amazonaws.appconfig#VersionLabel", "traits": { - "smithy.api#documentation": "An optional, user-defined label for the AppConfig hosted configuration version. This value must contain at least one non-numeric character. For example, \"v2.2.0\".
", + "smithy.api#documentation": "An optional, user-defined label for the AppConfig hosted configuration\n version. This value must contain at least one non-numeric character. For example,\n \"v2.2.0\".
", "smithy.api#httpHeader": "VersionLabel" } } @@ -2832,6 +2844,12 @@ "traits": { "smithy.api#documentation": "The KMS key identifier (key ID, key alias, or key ARN). AppConfig uses this ID to encrypt the configuration data using a customer managed key.
" } + }, + "VersionLabel": { + "target": "com.amazonaws.appconfig#VersionLabel", + "traits": { + "smithy.api#documentation": "A user-defined label for an AppConfig hosted configuration version.
" + } } } }, @@ -2853,7 +2871,7 @@ "Description": { "target": "com.amazonaws.appconfig#Description", "traits": { - "smithy.api#documentation": "A description of the deployment event. Descriptions include, but are not limited to, the\n user account or the Amazon CloudWatch alarm ARN that initiated a rollback, the percentage of hosts\n that received the deployment, or in the case of an internal error, a recommendation to\n attempt a new deployment.
" + "smithy.api#documentation": "A description of the deployment event. Descriptions include, but are not limited to, the\n following:
\nThe Amazon Web Services account or the Amazon CloudWatch alarm ARN that initiated a rollback.
\nThe percentage of hosts that received the deployment.
\nA recommendation to attempt a new deployment (in the case of an internal\n error).
\nTime the deployment completed.
" } + }, + "VersionLabel": { + "target": "com.amazonaws.appconfig#VersionLabel", + "traits": { + "smithy.api#documentation": "A user-defined label for an AppConfig hosted configuration version.
" + } } }, "traits": { @@ -4321,6 +4345,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4396,6 +4421,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4483,6 +4509,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4539,6 +4566,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4630,6 +4658,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4691,6 +4720,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4766,6 +4796,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4849,6 +4880,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Items", "pageSize": "MaxResults" } } @@ -4890,7 +4922,7 @@ "VersionLabel": { "target": "com.amazonaws.appconfig#QueryName", "traits": { - "smithy.api#documentation": "An optional filter that can be used to specify the version label of an AppConfig hosted configuration version. This parameter supports filtering by prefix using a wildcard, for example \"v2*\". If you don't specify an asterisk at the end of the value, only an exact match is returned.
", + "smithy.api#documentation": "An optional filter that can be used to specify the version label of an AppConfig hosted configuration version. This parameter supports filtering by prefix using a\n wildcard, for example \"v2*\". If you don't specify an asterisk at the end of the value, only\n an exact match is returned.
", "smithy.api#httpQuery": "version_label" } } @@ -5197,7 +5229,7 @@ } }, "traits": { - "smithy.api#documentation": "The number of hosted configuration versions exceeds the limit for the AppConfig hosted configuration store. Delete one or more versions and try again.
", + "smithy.api#documentation": "The number of one more AppConfig resources exceeds the maximum allowed. Verify that your\n environment doesn't exceed the following service quotas:
\nApplications: 100 max
\nDeployment strategies: 20 max
\nConfiguration profiles: 100 max per application
\nEnvironments: 20 max per application
\nTo resolve this issue, you can delete one or more resources and try again. Or, you\n can request a quota increase. For more information about quotas and to request an increase,\n see Service quotas for AppConfig in the Amazon Web Services General Reference.
", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -5269,7 +5301,7 @@ "ConfigurationVersion": { "target": "com.amazonaws.appconfig#Version", "traits": { - "smithy.api#documentation": "The configuration version to deploy. If deploying an AppConfig hosted configuration version, you can specify either the version number or version label.
", + "smithy.api#documentation": "The configuration version to deploy. If deploying an AppConfig hosted\n configuration version, you can specify either the version number or version label. For all\n other configurations, you must specify the version number.
", "smithy.api#required": {} } }, diff --git a/codegen/sdk/aws-models/appintegrations.json b/codegen/sdk/aws-models/appintegrations.json index bca0a85f1f4..d5ec1f77fe5 100644 --- a/codegen/sdk/aws-models/appintegrations.json +++ b/codegen/sdk/aws-models/appintegrations.json @@ -46,6 +46,9 @@ "type": "service", "version": "2020-07-29", "operations": [ + { + "target": "com.amazonaws.appintegrations#CreateApplication" + }, { "target": "com.amazonaws.appintegrations#CreateDataIntegration" }, @@ -58,12 +61,18 @@ { "target": "com.amazonaws.appintegrations#DeleteEventIntegration" }, + { + "target": "com.amazonaws.appintegrations#GetApplication" + }, { "target": "com.amazonaws.appintegrations#GetDataIntegration" }, { "target": "com.amazonaws.appintegrations#GetEventIntegration" }, + { + "target": "com.amazonaws.appintegrations#ListApplications" + }, { "target": "com.amazonaws.appintegrations#ListDataIntegrationAssociations" }, @@ -85,6 +94,9 @@ { "target": "com.amazonaws.appintegrations#UntagResource" }, + { + "target": "com.amazonaws.appintegrations#UpdateApplication" + }, { "target": "com.amazonaws.appintegrations#UpdateDataIntegration" }, @@ -853,6 +865,118 @@ } } }, + "com.amazonaws.appintegrations#ApplicationApprovedOrigins": { + "type": "list", + "member": { + "target": "com.amazonaws.appintegrations#ApplicationTrustedSource" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.appintegrations#ApplicationName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\/\\._ \\-]+$" + } + }, + "com.amazonaws.appintegrations#ApplicationNamespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\/\\._\\-]+$" + } + }, + "com.amazonaws.appintegrations#ApplicationSourceConfig": { + "type": "structure", + "members": { + "ExternalUrlConfig": { + "target": "com.amazonaws.appintegrations#ExternalUrlConfig", + "traits": { + "smithy.api#documentation": "The external URL source for the application.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration for where the application should be loaded from.
" + } + }, + "com.amazonaws.appintegrations#ApplicationSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.appintegrations#Arn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Application.
" + } + }, + "Id": { + "target": "com.amazonaws.appintegrations#UUID", + "traits": { + "smithy.api#documentation": "A unique identifier for the Application.
" + } + }, + "Name": { + "target": "com.amazonaws.appintegrations#ApplicationName", + "traits": { + "smithy.api#documentation": "The name of the application.
" + } + }, + "Namespace": { + "target": "com.amazonaws.appintegrations#ApplicationNamespace", + "traits": { + "smithy.api#documentation": "The namespace of the application.
" + } + }, + "CreatedTime": { + "target": "com.amazonaws.appintegrations#Timestamp", + "traits": { + "smithy.api#documentation": "The time when the application was created.
" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.appintegrations#Timestamp", + "traits": { + "smithy.api#documentation": "The time when the application was last modified.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Summary information about the Application.
" + } + }, + "com.amazonaws.appintegrations#ApplicationTrustedSource": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^\\w+\\:\\/\\/.*$" + } + }, + "com.amazonaws.appintegrations#ApplicationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.appintegrations#ApplicationSummary" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, "com.amazonaws.appintegrations#Arn": { "type": "string", "traits": { @@ -863,6 +987,16 @@ "smithy.api#pattern": "^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" } }, + "com.amazonaws.appintegrations#ArnOrUUID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}|[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})(:[\\w\\$]+)?$" + } + }, "com.amazonaws.appintegrations#ClientAssociationMetadata": { "type": "map", "key": { @@ -882,6 +1016,143 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.appintegrations#CreateApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#CreateApplicationRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#CreateApplicationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#DuplicateResourceException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceQuotaExceededException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "This API is in preview release and subject to change.
\nCreates and persists an Application resource.
", + "smithy.api#examples": [ + { + "title": "To create an application", + "documentation": "The following creates an application named My Application with access url https://example.com.", + "input": { + "Name": "My Application", + "Namespace": "myapplication", + "Description": "My first application.", + "ApplicationSourceConfig": { + "ExternalUrlConfig": { + "AccessUrl": "https://example.com" + } + } + }, + "output": { + "Id": "98542c53-e8ac-4570-9c85-c6552c8d9c5e", + "Arn": "arn:aws:app-integrations:us-west-2:0123456789012:application/98542c53-e8ac-4570-9c85-c6552c8d9c5e" + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/applications", + "code": 200 + } + } + }, + "com.amazonaws.appintegrations#CreateApplicationRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appintegrations#ApplicationName", + "traits": { + "smithy.api#documentation": "The name of the application.
", + "smithy.api#required": {} + } + }, + "Namespace": { + "target": "com.amazonaws.appintegrations#ApplicationNamespace", + "traits": { + "smithy.api#documentation": "The namespace of the application.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appintegrations#Description", + "traits": { + "smithy.api#documentation": "The description of the application.
" + } + }, + "ApplicationSourceConfig": { + "target": "com.amazonaws.appintegrations#ApplicationSourceConfig", + "traits": { + "smithy.api#documentation": "The configuration for where the application should be loaded from.
", + "smithy.api#required": {} + } + }, + "Subscriptions": { + "target": "com.amazonaws.appintegrations#SubscriptionList", + "traits": { + "smithy.api#documentation": "The events that the application subscribes.
" + } + }, + "Publications": { + "target": "com.amazonaws.appintegrations#PublicationList", + "traits": { + "smithy.api#documentation": "The events that the application publishes.
" + } + }, + "ClientToken": { + "target": "com.amazonaws.appintegrations#IdempotencyToken", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs.
", + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.appintegrations#TagMap", + "traits": { + "smithy.api#documentation": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#CreateApplicationResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.appintegrations#Arn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Application.
" + } + }, + "Id": { + "target": "com.amazonaws.appintegrations#UUID", + "traits": { + "smithy.api#documentation": "A unique identifier for the Application.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appintegrations#CreateDataIntegration": { "type": "operation", "input": { @@ -1392,6 +1663,16 @@ "smithy.api#pattern": "^[a-zA-Z0-9\\/\\._\\-]+$" } }, + "com.amazonaws.appintegrations#EventDefinitionSchema": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10240 + }, + "smithy.api#pattern": "^.*$" + } + }, "com.amazonaws.appintegrations#EventFilter": { "type": "structure", "members": { @@ -1519,6 +1800,37 @@ } } }, + "com.amazonaws.appintegrations#EventName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\/\\._\\-]+::[a-zA-Z0-9\\/\\._\\-]+(?:\\*)?$" + } + }, + "com.amazonaws.appintegrations#ExternalUrlConfig": { + "type": "structure", + "members": { + "AccessUrl": { + "target": "com.amazonaws.appintegrations#URL", + "traits": { + "smithy.api#documentation": "The URL to access the application.
", + "smithy.api#required": {} + } + }, + "ApprovedOrigins": { + "target": "com.amazonaws.appintegrations#ApplicationApprovedOrigins", + "traits": { + "smithy.api#documentation": "Additional URLs to allow list if different than the access URL.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The external URL source for the application.
" + } + }, "com.amazonaws.appintegrations#Fields": { "type": "string", "traits": { @@ -1583,13 +1895,13 @@ } } }, - "com.amazonaws.appintegrations#GetDataIntegration": { + "com.amazonaws.appintegrations#GetApplication": { "type": "operation", "input": { - "target": "com.amazonaws.appintegrations#GetDataIntegrationRequest" + "target": "com.amazonaws.appintegrations#GetApplicationRequest" }, "output": { - "target": "com.amazonaws.appintegrations#GetDataIntegrationResponse" + "target": "com.amazonaws.appintegrations#GetApplicationResponse" }, "errors": [ { @@ -1609,21 +1921,40 @@ } ], "traits": { - "smithy.api#documentation": "Returns information about the DataIntegration.
\nYou cannot create a DataIntegration association for a DataIntegration that has been previously associated. \nUse a different DataIntegration, or recreate the DataIntegration using the \nCreateDataIntegration API.
\nThis API is in preview release and subject to change.
\nGet an Application resource.
", + "smithy.api#examples": [ + { + "title": "To get an application", + "documentation": "The following retrives an application.", + "input": { + "Arn": "arn:aws:app-integrations:us-west-2:0123456789012:application/98542c53-e8ac-4570-9c85-c6552c8d9c5e" + }, + "output": { + "Name": "My Application", + "Namespace": "myapplication", + "Description": "My first application.", + "ApplicationSourceConfig": { + "ExternalUrlConfig": { + "AccessUrl": "https://example.com" + } + } + } + } + ], "smithy.api#http": { "method": "GET", - "uri": "/dataIntegrations/{Identifier}", + "uri": "/applications/{Arn}", "code": 200 } } }, - "com.amazonaws.appintegrations#GetDataIntegrationRequest": { + "com.amazonaws.appintegrations#GetApplicationRequest": { "type": "structure", "members": { - "Identifier": { - "target": "com.amazonaws.appintegrations#Identifier", + "Arn": { + "target": "com.amazonaws.appintegrations#ArnOrUUID", "traits": { - "smithy.api#documentation": "A unique identifier.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Application.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1633,47 +1964,171 @@ "smithy.api#input": {} } }, - "com.amazonaws.appintegrations#GetDataIntegrationResponse": { + "com.amazonaws.appintegrations#GetApplicationResponse": { "type": "structure", "members": { "Arn": { "target": "com.amazonaws.appintegrations#Arn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) for the DataIntegration.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Application.
" } }, "Id": { "target": "com.amazonaws.appintegrations#UUID", "traits": { - "smithy.api#documentation": "A unique identifier.
" + "smithy.api#documentation": "A unique identifier for the Application.
" } }, "Name": { - "target": "com.amazonaws.appintegrations#Name", + "target": "com.amazonaws.appintegrations#ApplicationName", "traits": { - "smithy.api#documentation": "The name of the DataIntegration.
" + "smithy.api#documentation": "The name of the application.
" + } + }, + "Namespace": { + "target": "com.amazonaws.appintegrations#ApplicationNamespace", + "traits": { + "smithy.api#documentation": "The namespace of the application.
" } }, "Description": { "target": "com.amazonaws.appintegrations#Description", "traits": { - "smithy.api#documentation": "The KMS key for the DataIntegration.
" + "smithy.api#documentation": "The description of the application.
" } }, - "KmsKey": { - "target": "com.amazonaws.appintegrations#NonBlankString", + "ApplicationSourceConfig": { + "target": "com.amazonaws.appintegrations#ApplicationSourceConfig", "traits": { - "smithy.api#documentation": "The KMS key for the DataIntegration.
" + "smithy.api#documentation": "The configuration for where the application should be loaded from.
" } }, - "SourceURI": { - "target": "com.amazonaws.appintegrations#SourceURI", + "Subscriptions": { + "target": "com.amazonaws.appintegrations#SubscriptionList", "traits": { - "smithy.api#documentation": "The URI of the data source.
" + "smithy.api#documentation": "The events that the application subscribes.
" } }, - "ScheduleConfiguration": { - "target": "com.amazonaws.appintegrations#ScheduleConfiguration", + "Publications": { + "target": "com.amazonaws.appintegrations#PublicationList", + "traits": { + "smithy.api#documentation": "The events that the application publishes.
" + } + }, + "CreatedTime": { + "target": "com.amazonaws.appintegrations#Timestamp", + "traits": { + "smithy.api#documentation": "The created time of the Application.
" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.appintegrations#Timestamp", + "traits": { + "smithy.api#documentation": "The last modified time of the Application.
" + } + }, + "Tags": { + "target": "com.amazonaws.appintegrations#TagMap", + "traits": { + "smithy.api#documentation": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.appintegrations#GetDataIntegration": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#GetDataIntegrationRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#GetDataIntegrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "Returns information about the DataIntegration.
\nYou cannot create a DataIntegration association for a DataIntegration that has been previously associated. \nUse a different DataIntegration, or recreate the DataIntegration using the \nCreateDataIntegration API.
\nA unique identifier.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#GetDataIntegrationResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.appintegrations#Arn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) for the DataIntegration.
" + } + }, + "Id": { + "target": "com.amazonaws.appintegrations#UUID", + "traits": { + "smithy.api#documentation": "A unique identifier.
" + } + }, + "Name": { + "target": "com.amazonaws.appintegrations#Name", + "traits": { + "smithy.api#documentation": "The name of the DataIntegration.
" + } + }, + "Description": { + "target": "com.amazonaws.appintegrations#Description", + "traits": { + "smithy.api#documentation": "The KMS key for the DataIntegration.
" + } + }, + "KmsKey": { + "target": "com.amazonaws.appintegrations#NonBlankString", + "traits": { + "smithy.api#documentation": "The KMS key for the DataIntegration.
" + } + }, + "SourceURI": { + "target": "com.amazonaws.appintegrations#SourceURI", + "traits": { + "smithy.api#documentation": "The URI of the data source.
" + } + }, + "ScheduleConfiguration": { + "target": "com.amazonaws.appintegrations#ScheduleConfiguration", "traits": { "smithy.api#documentation": "The name of the data and how often it should be pulled from the source.
" } @@ -1841,6 +2296,105 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.appintegrations#ListApplications": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#ListApplicationsRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#ListApplicationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "This API is in preview release and subject to change.
\nLists applications in the account.
", + "smithy.api#examples": [ + { + "title": "To list applications in the account", + "documentation": "The following lists application summary in the account.", + "input": { + "MaxResults": 1 + }, + "output": { + "Applications": [ + { + "Id": "98542c53-e8ac-4570-9c85-c6552c8d9c5e", + "Arn": "arn:aws:app-integrations:us-west-2:0123456789012:application/98542c53-e8ac-4570-9c85-c6552c8d9c5e", + "Name": "My Application", + "Namespace": "myapplication" + } + ], + "NextToken": "abc" + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/applications", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Applications", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.appintegrations#ListApplicationsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.appintegrations#NextToken", + "traits": { + "smithy.api#documentation": "The token for the next set of results. Use the value returned in the previous \nresponse in the next request to retrieve the next set of results.
", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.appintegrations#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return per page.
", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#ListApplicationsResponse": { + "type": "structure", + "members": { + "Applications": { + "target": "com.amazonaws.appintegrations#ApplicationsList", + "traits": { + "smithy.api#documentation": "The Applications associated with this account.
" + } + }, + "NextToken": { + "target": "com.amazonaws.appintegrations#NextToken", + "traits": { + "smithy.api#documentation": "If there are additional results, this is the token for the next set of results.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appintegrations#ListDataIntegrationAssociations": { "type": "operation", "input": { @@ -1872,6 +2426,12 @@ "method": "GET", "uri": "/dataIntegrations/{DataIntegrationIdentifier}/associations", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DataIntegrationAssociations", + "pageSize": "MaxResults" } } }, @@ -1953,6 +2513,12 @@ "method": "GET", "uri": "/dataIntegrations", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DataIntegrations", + "pageSize": "MaxResults" } } }, @@ -2029,6 +2595,12 @@ "method": "GET", "uri": "/eventIntegrations/{EventIntegrationName}/associations", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "EventIntegrationAssociations", + "pageSize": "MaxResults" } } }, @@ -2110,6 +2682,12 @@ "method": "GET", "uri": "/eventIntegrations", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "EventIntegrations", + "pageSize": "MaxResults" } } }, @@ -2290,6 +2868,46 @@ "smithy.api#documentation": "The configuration for what data should be pulled from the source.
" } }, + "com.amazonaws.appintegrations#Publication": { + "type": "structure", + "members": { + "Event": { + "target": "com.amazonaws.appintegrations#EventName", + "traits": { + "smithy.api#documentation": "The name of the publication.
", + "smithy.api#required": {} + } + }, + "Schema": { + "target": "com.amazonaws.appintegrations#EventDefinitionSchema", + "traits": { + "smithy.api#documentation": "The JSON schema of the publication event.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appintegrations#Description", + "traits": { + "smithy.api#documentation": "The description of the publication.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration of an event that the application publishes.
" + } + }, + "com.amazonaws.appintegrations#PublicationList": { + "type": "list", + "member": { + "target": "com.amazonaws.appintegrations#Publication" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, "com.amazonaws.appintegrations#ResourceNotFoundException": { "type": "structure", "members": { @@ -2363,6 +2981,39 @@ "smithy.api#pattern": "^(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+$)|(\\w+\\:\\/\\/[\\w.-]+[\\w/!@#+=.-]+[\\w/!@#+=.-]+[\\w/!@#+=.,-]+$)$" } }, + "com.amazonaws.appintegrations#Subscription": { + "type": "structure", + "members": { + "Event": { + "target": "com.amazonaws.appintegrations#EventName", + "traits": { + "smithy.api#documentation": "The name of the subscription.
", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appintegrations#Description", + "traits": { + "smithy.api#documentation": "The description of the subscription.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration of an event that the application subscribes.
" + } + }, + "com.amazonaws.appintegrations#SubscriptionList": { + "type": "list", + "member": { + "target": "com.amazonaws.appintegrations#Subscription" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, "com.amazonaws.appintegrations#TagKey": { "type": "string", "traits": { @@ -2483,6 +3134,19 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.appintegrations#Timestamp": { + "type": "timestamp" + }, + "com.amazonaws.appintegrations#URL": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^\\w+\\:\\/\\/.*$" + } + }, "com.amazonaws.appintegrations#UUID": { "type": "string", "traits": { @@ -2551,6 +3215,104 @@ "smithy.api#output": {} } }, + "com.amazonaws.appintegrations#UpdateApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.appintegrations#UpdateApplicationRequest" + }, + "output": { + "target": "com.amazonaws.appintegrations#UpdateApplicationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appintegrations#AccessDeniedException" + }, + { + "target": "com.amazonaws.appintegrations#InternalServiceError" + }, + { + "target": "com.amazonaws.appintegrations#InvalidRequestException" + }, + { + "target": "com.amazonaws.appintegrations#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appintegrations#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "This API is in preview release and subject to change.
\nUpdates and persists an Application resource.
", + "smithy.api#examples": [ + { + "title": "To update an application", + "documentation": "The following updates an existing application named with a new name.", + "input": { + "Arn": "arn:aws:app-integrations:us-west-2:0123456789012:application/98542c53-e8ac-4570-9c85-c6552c8d9c5e", + "Name": "My New Application Name" + }, + "output": {} + } + ], + "smithy.api#http": { + "method": "PATCH", + "uri": "/applications/{Arn}", + "code": 200 + } + } + }, + "com.amazonaws.appintegrations#UpdateApplicationRequest": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.appintegrations#ArnOrUUID", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Application.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.appintegrations#ApplicationName", + "traits": { + "smithy.api#documentation": "The name of the application.
" + } + }, + "Description": { + "target": "com.amazonaws.appintegrations#Description", + "traits": { + "smithy.api#documentation": "The description of the application.
" + } + }, + "ApplicationSourceConfig": { + "target": "com.amazonaws.appintegrations#ApplicationSourceConfig", + "traits": { + "smithy.api#documentation": "The configuration for where the application should be loaded from.
" + } + }, + "Subscriptions": { + "target": "com.amazonaws.appintegrations#SubscriptionList", + "traits": { + "smithy.api#documentation": "The events that the application subscribes.
" + } + }, + "Publications": { + "target": "com.amazonaws.appintegrations#PublicationList", + "traits": { + "smithy.api#documentation": "The events that the application publishes.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.appintegrations#UpdateApplicationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.appintegrations#UpdateDataIntegration": { "type": "operation", "input": { @@ -2668,7 +3430,7 @@ "Description": { "target": "com.amazonaws.appintegrations#Description", "traits": { - "smithy.api#documentation": "The description of the event inegration.
" + "smithy.api#documentation": "The description of the event integration.
" } } }, diff --git a/codegen/sdk/aws-models/application-discovery-service.json b/codegen/sdk/aws-models/application-discovery-service.json index be6ea71ed5a..854b31c8d05 100644 --- a/codegen/sdk/aws-models/application-discovery-service.json +++ b/codegen/sdk/aws-models/application-discovery-service.json @@ -969,7 +969,8 @@ } }, "traits": { - "smithy.api#documentation": "Network details about the host where the agent/collector resides.
" + "smithy.api#documentation": "Network details about the host where the agent/collector resides.
", + "smithy.api#sensitive": {} } }, "com.amazonaws.applicationdiscoveryservice#AgentNetworkInfoList": { diff --git a/codegen/sdk/aws-models/apprunner.json b/codegen/sdk/aws-models/apprunner.json index f5d51a34138..72c0c8e33e9 100644 --- a/codegen/sdk/aws-models/apprunner.json +++ b/codegen/sdk/aws-models/apprunner.json @@ -134,6 +134,9 @@ { "target": "com.amazonaws.apprunner#ListServices" }, + { + "target": "com.amazonaws.apprunner#ListServicesForAutoScalingConfiguration" + }, { "target": "com.amazonaws.apprunner#ListTagsForResource" }, @@ -158,6 +161,9 @@ { "target": "com.amazonaws.apprunner#UntagResource" }, + { + "target": "com.amazonaws.apprunner#UpdateDefaultAutoScalingConfiguration" + }, { "target": "com.amazonaws.apprunner#UpdateService" }, @@ -1023,16 +1029,14 @@ } }, "AutoScalingConfigurationRevision": { - "target": "com.amazonaws.apprunner#Integer", + "target": "com.amazonaws.apprunner#AutoScalingConfigurationRevision", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) that share the same\n AutoScalingConfigurationName
.
It's set to true
for the configuration with the highest Revision
among all configurations that share the same\n AutoScalingConfigurationName
. It's set to false
otherwise.
The maximum number of concurrent requests that an instance processes. If the number of concurrent requests exceeds this limit, App Runner scales the service\n up.
" } }, "MinSize": { - "target": "com.amazonaws.apprunner#Integer", + "target": "com.amazonaws.apprunner#MinSize", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "The minimum number of instances that App Runner provisions for a service. The service always has at least MinSize
provisioned instances. Some\n of them actively serve traffic. The rest of them (provisioned and inactive instances) are a cost-effective compute capacity reserve and are ready to be\n quickly activated. You pay for memory usage of all the provisioned instances. You pay for CPU usage of only the active subset.
App Runner temporarily doubles the number of provisioned instances during deployments, to maintain the same capacity for both old and new code.
" } }, "MaxSize": { - "target": "com.amazonaws.apprunner#Integer", + "target": "com.amazonaws.apprunner#MaxSize", "traits": { - "smithy.api#default": 0, "smithy.api#documentation": "The maximum number of instances that a service scales up to. At most MaxSize
instances actively serve traffic for your service.
The time when the auto scaling configuration was deleted. It's in Unix time stamp format.
" } + }, + "HasAssociatedService": { + "target": "com.amazonaws.apprunner#HasAssociatedService", + "traits": { + "smithy.api#documentation": "Indicates if this auto scaling configuration has an App Runner service associated with it. A value of true
indicates one or more services are\n associated. A value of false
indicates no services are associated.
Indicates if this auto scaling configuration should be used as the default for a new App Runner service that does not have an\n auto scaling configuration ARN specified during creation. Each account can have only one\n default AutoScalingConfiguration
per region. The default AutoScalingConfiguration
can be any revision under \n the same AutoScalingConfigurationName
.
The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) with the same\n AutoScalingConfigurationName
.
The current state of the auto scaling configuration. If the status of a configuration revision is INACTIVE
, it was deleted and can't be\n used. Inactive configuration revisions are permanently removed some time after they are deleted.
The time when the auto scaling configuration was created. It's in Unix time stamp format.
" + } + }, + "HasAssociatedService": { + "target": "com.amazonaws.apprunner#HasAssociatedService", + "traits": { + "smithy.api#documentation": "Indicates if this auto scaling configuration has an App Runner service associated with it. A value of true
indicates one or more services are\n associated. A value of false
indicates no services are associated.
Indicates if this auto scaling configuration should be used as the default for a new App Runner service that does not have an\n auto scaling configuration ARN specified during creation. Each account can have only one\n default AutoScalingConfiguration
per region. The default AutoScalingConfiguration
can be any revision under \n the same AutoScalingConfigurationName
.
The command App Runner runs to build your application.
" + "smithy.api#documentation": "The command App Runner runs to build your\n application.
" } }, "StartCommand": { "target": "com.amazonaws.apprunner#StartCommand", "traits": { - "smithy.api#documentation": "The command App Runner runs to start your application.
" + "smithy.api#documentation": "The command App Runner runs to start your\n application.
" } }, "Port": { @@ -1272,7 +1312,7 @@ "RuntimeEnvironmentSecrets": { "target": "com.amazonaws.apprunner#RuntimeEnvironmentSecrets", "traits": { - "smithy.api#documentation": "An array of key-value pairs representing the secrets and parameters that get referenced to your service as an environment variable. \n The supported values are either the full Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.
\n\n If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Amazon Web Services Region as the service that you're launching, \n you can use either the full ARN or name of the secret. If the parameter exists in a different Region, then the full ARN must be specified.\n
\n\n Currently, cross account referencing of Amazon Web Services Systems Manager Parameter Store parameter is not supported.\n
\nAn array of key-value pairs representing the secrets and parameters that get referenced to your service as an environment variable. The supported\n values are either the full Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager\n Parameter Store.
\nIf the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Amazon Web Services Region as the service that you're launching, you can use\n either the full ARN or name of the secret. If the parameter exists in a different Region, then the full ARN must be specified.
\nCurrently, cross account referencing of Amazon Web Services Systems Manager Parameter Store parameter is not supported.
\nConfiguration for building and running the service from a source code repository.
\n\n CodeConfiguration
is required only for CreateService
request.
The path of the directory that stores source code and configuration files. The build and start commands also execute from here. The path is absolute\n from root and, if not specified, defaults to the repository root.
" + } } }, "traits": { @@ -1476,7 +1522,7 @@ } ], "traits": { - "smithy.api#documentation": "Create an App Runner automatic scaling configuration resource. App Runner requires this resource when you create or update App Runner services and you require\n non-default auto scaling settings. You can share an auto scaling configuration across multiple services.
\nCreate multiple revisions of a configuration by calling this action multiple times using the same AutoScalingConfigurationName
. The call\n returns incremental AutoScalingConfigurationRevision
values. When you create a service and configure an auto scaling configuration resource,\n the service uses the latest active revision of the auto scaling configuration by default. You can optionally configure the service to use a specific\n revision.
Configure a higher MinSize
to increase the spread of your App Runner service over more Availability Zones in the Amazon Web Services Region. The tradeoff is\n a higher minimal cost.
Configure a lower MaxSize
to control your cost. The tradeoff is lower responsiveness during peak demand.
Create an App Runner automatic scaling configuration resource. App Runner requires this resource when you create or update App Runner services and you require\n non-default auto scaling settings. You can share an auto scaling configuration across multiple services.
\nCreate multiple revisions of a configuration by calling this action multiple times using the same AutoScalingConfigurationName
. The call\n returns incremental AutoScalingConfigurationRevision
values. When you create a service and configure an auto scaling configuration resource,\n the service uses the latest active revision of the auto scaling configuration by default. You can optionally configure the service to use a specific\n revision.
Configure a higher MinSize
to increase the spread of your App Runner service over more Availability Zones in the Amazon Web Services Region. The\n tradeoff is a higher minimal cost.
Configure a lower MaxSize
to control your cost. The tradeoff is lower responsiveness during peak demand.
A name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number 1
of this\n name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.
The name DefaultConfiguration
is reserved (it's the configuration that App Runner uses if you don't provide a custome one). You can't use it\n to create a new auto scaling configuration, and you can't create a revision of it.
When you want to use your own auto scaling configuration for your App Runner service, create a configuration with a different name,\n and then provide it when you create or update your service.
\nA name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number\n 1
of this name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.
Prior to the release of Auto scale\n configuration enhancements, the name DefaultConfiguration
was reserved.
This restriction is no longer in place. You can now manage DefaultConfiguration
the same way you manage your custom auto scaling\n configurations. This means you can do the following with the DefaultConfiguration
that App Runner provides:
Create new revisions of the DefaultConfiguration
.
Delete the revisions of the DefaultConfiguration
.
Delete the auto scaling configuration for which the App Runner DefaultConfiguration
was created.
If you delete the auto scaling configuration you can create another custom auto scaling configuration with the same\n DefaultConfiguration
name. The original DefaultConfiguration
resource provided by App Runner remains in your account unless\n you make changes to it.
Delete an App Runner automatic scaling configuration resource. You can delete a specific revision or the latest active revision. You can't delete a\n configuration that's used by one or more App Runner services.
" + "smithy.api#documentation": "Delete an App Runner automatic scaling configuration resource. You can delete a top level auto scaling configuration, a specific revision of one, or all\n revisions associated with the top level configuration. You can't delete the default auto scaling configuration or a configuration that's used by one or\n more App Runner services.
" } }, "com.amazonaws.apprunner#DeleteAutoScalingConfigurationRequest": { @@ -2051,6 +2097,13 @@ "smithy.api#documentation": "The Amazon Resource Name (ARN) of the App Runner auto scaling configuration that you want to delete.
\nThe ARN can be a full auto scaling configuration ARN, or a partial ARN ending with either .../name\n
or\n .../name/revision\n
. If a revision isn't specified, the latest active revision is deleted.
Set to true
to delete all of the revisions associated with the AutoScalingConfigurationArn
parameter value.
When DeleteAllRevisions
is set to true
, the only valid value for the Amazon Resource Name (ARN) is a partial ARN ending\n with: .../name
.
An array of key-value pairs representing the secrets and parameters that get referenced to your service as an environment variable. \n The supported values are either the full Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.
\n\n If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Amazon Web Services Region as the service that you're launching, \n you can use either the full ARN or name of the secret. If the parameter exists in a different Region, then the full ARN must be specified.\n
\n\n Currently, cross account referencing of Amazon Web Services Systems Manager Parameter Store parameter is not supported.\n
\nAn array of key-value pairs representing the secrets and parameters that get referenced to your service as an environment variable. The supported\n values are either the full Amazon Resource Name (ARN) of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager\n Parameter Store.
\nIf the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Amazon Web Services Region as the service that you're launching, you can use\n either the full ARN or name of the secret. If the parameter exists in a different Region, then the full ARN must be specified.
\nCurrently, cross account referencing of Amazon Web Services Systems Manager Parameter Store parameter is not supported.
\nReturns a list of the associated App Runner services using an auto scaling configuration.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.apprunner#ListServicesForAutoScalingConfigurationRequest": { + "type": "structure", + "members": { + "AutoScalingConfigurationArn": { + "target": "com.amazonaws.apprunner#AppRunnerResourceArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the App Runner auto scaling configuration that you want to list the services for.
\nThe ARN can be a full auto scaling configuration ARN, or a partial ARN ending with either .../name\n
or\n .../name/revision\n
. If a revision isn't specified, the latest active revision is used.
The maximum number of results to include in each response (result page). It's used for a paginated request.
\nIf you don't specify MaxResults
, the request retrieves all available results in a single response.
A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be\n identical to the ones specified in the initial request.
\nIf you don't specify NextToken
, the request retrieves the first result page.
A list of service ARN records. In a paginated request, the request returns up to MaxResults
records for each call.
The token that you can pass in a subsequent request to get the next result page. It's returned in a paginated request.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.apprunner#ListServicesRequest": { "type": "structure", "members": { @@ -3788,6 +3926,9 @@ "smithy.api#output": {} } }, + "com.amazonaws.apprunner#MaxConcurrency": { + "type": "integer" + }, "com.amazonaws.apprunner#MaxResults": { "type": "integer", "traits": { @@ -3797,6 +3938,9 @@ } } }, + "com.amazonaws.apprunner#MaxSize": { + "type": "integer" + }, "com.amazonaws.apprunner#Memory": { "type": "string", "traits": { @@ -3807,6 +3951,9 @@ "smithy.api#pattern": "^512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) GB$" } }, + "com.amazonaws.apprunner#MinSize": { + "type": "integer" + }, "com.amazonaws.apprunner#NetworkConfiguration": { "type": "structure", "members": { @@ -4448,7 +4595,7 @@ "Status": { "target": "com.amazonaws.apprunner#ServiceStatus", "traits": { - "smithy.api#documentation": "The current state of the App Runner service. These particular values mean the following.
\n\n CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. \n To troubleshoot this failure, read the failure events and logs, change any\n parameters that need to be fixed, and rebuild your service using UpdateService
.
\n DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure\n that all related resources are removed.
The current state of the App Runner service. These particular values mean the following.
\n\n CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. To\n troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using\n UpdateService
.
\n DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure\n that all related resources are removed.
Describes an App Runner service. It can describe a service in any state, including deleted services.
\nThis type contains the full information about a service, including configuration details. It's returned by the CreateService, DescribeService, and DeleteService actions. A subset of this\n information is returned by the ListServices action using the ServiceSummary type.
" } }, + "com.amazonaws.apprunner#ServiceArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.apprunner#AppRunnerResourceArn" + } + }, "com.amazonaws.apprunner#ServiceId": { "type": "string", "traits": { @@ -4546,7 +4699,7 @@ "ObservabilityConfigurationArn": { "target": "com.amazonaws.apprunner#AppRunnerResourceArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the observability configuration that is associated with the service. Specified only when\n ObservabilityEnabled
is true
.
Specify an ARN with a name and a revision number to associate that revision. For example:\n arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing/3
\n
Specify just the name to associate the latest revision. For example:\n arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing
\n
The Amazon Resource Name (ARN) of the observability configuration that is associated with the service. Specified only when\n ObservabilityEnabled
is true
.
Specify an ARN with a name and a revision number to associate that revision. For example:\n arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing/3
\n
Specify just the name to associate the latest revision. For example:\n arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing
\n
The current state of the App Runner service. These particular values mean the following.
\n\n CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. \n To troubleshoot this failure, read the failure events and logs, change any\n parameters that need to be fixed, and rebuild your service using UpdateService
.
\n DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure\n that all related resources are removed.
The current state of the App Runner service. These particular values mean the following.
\n\n CREATE_FAILED
– The service failed to create. The failed service isn't usable, and still counts towards your service quota. To\n troubleshoot this failure, read the failure events and logs, change any parameters that need to be fixed, and rebuild your service using\n UpdateService
.
\n DELETE_FAILED
– The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure\n that all related resources are removed.
The description of a source code repository.
\nYou must provide either this member or ImageRepository
(but not both).
The description of a source code\n repository.
\nYou must provide either this member or ImageRepository
(but not both).
Describes the source deployed to an App Runner service. It can be a code or an image repository.
" } }, + "com.amazonaws.apprunner#SourceDirectory": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#pattern": "^[^\\x00]+$" + } + }, "com.amazonaws.apprunner#StartCommand": { "type": "string", "traits": { @@ -5010,6 +5173,59 @@ "smithy.api#output": {} } }, + "com.amazonaws.apprunner#UpdateDefaultAutoScalingConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.apprunner#UpdateDefaultAutoScalingConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.apprunner#UpdateDefaultAutoScalingConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apprunner#InternalServiceErrorException" + }, + { + "target": "com.amazonaws.apprunner#InvalidRequestException" + }, + { + "target": "com.amazonaws.apprunner#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Update an auto scaling configuration to be the default. The existing default auto scaling configuration will be set to non-default\n automatically.
" + } + }, + "com.amazonaws.apprunner#UpdateDefaultAutoScalingConfigurationRequest": { + "type": "structure", + "members": { + "AutoScalingConfigurationArn": { + "target": "com.amazonaws.apprunner#AppRunnerResourceArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the App Runner auto scaling configuration that you want to set as the default.
\nThe ARN can be a full auto scaling configuration ARN, or a partial ARN ending with either .../name\n
or\n .../name/revision\n
. If a revision isn't specified, the latest active revision is set as the\n default.
A description of the App Runner auto scaling configuration that was set as default.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.apprunner#UpdateService": { "type": "operation", "input": { diff --git a/codegen/sdk/aws-models/braket.json b/codegen/sdk/aws-models/braket.json index de2d981ab5f..cf13835111a 100644 --- a/codegen/sdk/aws-models/braket.json +++ b/codegen/sdk/aws-models/braket.json @@ -1297,6 +1297,40 @@ "smithy.api#httpError": 424 } }, + "com.amazonaws.braket#DeviceQueueInfo": { + "type": "structure", + "members": { + "queue": { + "target": "com.amazonaws.braket#QueueName", + "traits": { + "smithy.api#documentation": "The name of the queue.
", + "smithy.api#required": {} + } + }, + "queueSize": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "The number of jobs or tasks in the queue for a given device.
", + "smithy.api#required": {} + } + }, + "queuePriority": { + "target": "com.amazonaws.braket#QueuePriority", + "traits": { + "smithy.api#documentation": "Optional. Specifies the priority of the queue. Tasks in a priority queue\n are processed before the tasks in a normal queue.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about tasks and jobs queued on a device.
" + } + }, + "com.amazonaws.braket#DeviceQueueInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.braket#DeviceQueueInfo" + } + }, "com.amazonaws.braket#DeviceResource": { "type": "resource", "identifiers": { @@ -1504,6 +1538,12 @@ "smithy.api#documentation": "Details about the capabilities of the device.
", "smithy.api#required": {} } + }, + "deviceQueueInfo": { + "target": "com.amazonaws.braket#DeviceQueueInfoList", + "traits": { + "smithy.api#documentation": "List of information about tasks and jobs queued on a device.
" + } } } }, @@ -1551,6 +1591,13 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "additionalAttributeNames": { + "target": "com.amazonaws.braket#HybridJobAdditionalAttributeNamesList", + "traits": { + "smithy.api#documentation": "A list of attributes to return information for.
", + "smithy.api#httpQuery": "additionalAttributeNames" + } } } }, @@ -1686,6 +1733,12 @@ "traits": { "smithy.api#documentation": "A tag object that consists of a key and an optional value, used to manage metadata for\n Amazon Braket resources.
" } + }, + "queueInfo": { + "target": "com.amazonaws.braket#HybridJobQueueInfo", + "traits": { + "smithy.api#documentation": "Queue information for the requested job. Only returned if \n QueueInfo
is specified in the additionalAttributeNames\"
\n field in the GetJob
API request.
A list of attributes to return information for.
", + "smithy.api#httpQuery": "additionalAttributeNames" + } } } }, @@ -1820,7 +1880,58 @@ "traits": { "smithy.api#documentation": "The ARN of the Amazon Braket job associated with the quantum task.
" } + }, + "queueInfo": { + "target": "com.amazonaws.braket#QuantumTaskQueueInfo", + "traits": { + "smithy.api#documentation": "Queue information for the requested quantum task. Only returned if \n QueueInfo
is specified in the additionalAttributeNames\"
\n field in the GetQuantumTask
API request.
The name of the queue.
", + "smithy.api#required": {} + } + }, + "position": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Current position of the job in the jobs queue.
", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Optional. Provides more information about the queue position. For example,\n if the job is complete and no longer in the queue, the message field contains\n that information.
" + } } + }, + "traits": { + "smithy.api#documentation": "Information about the queue for a specified job.
" } }, "com.amazonaws.braket#HyperParameters": { @@ -2449,6 +2560,23 @@ } } }, + "com.amazonaws.braket#QuantumTaskAdditionalAttributeName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "QueueInfo", + "name": "QUEUE_INFO" + } + ] + } + }, + "com.amazonaws.braket#QuantumTaskAdditionalAttributeNamesList": { + "type": "list", + "member": { + "target": "com.amazonaws.braket#QuantumTaskAdditionalAttributeName" + } + }, "com.amazonaws.braket#QuantumTaskArn": { "type": "string", "traits": { @@ -2457,11 +2585,45 @@ "resource": "com.amazonaws.braket#QuantumTaskResource" }, "smithy.api#length": { - "min": 1, + "min": 0, "max": 256 } } }, + "com.amazonaws.braket#QuantumTaskQueueInfo": { + "type": "structure", + "members": { + "queue": { + "target": "com.amazonaws.braket#QueueName", + "traits": { + "smithy.api#documentation": "The name of the queue.
", + "smithy.api#required": {} + } + }, + "position": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Current position of the task in the quantum tasks queue.
", + "smithy.api#required": {} + } + }, + "queuePriority": { + "target": "com.amazonaws.braket#QueuePriority", + "traits": { + "smithy.api#documentation": "Optional. Specifies the priority of the queue. Quantum tasks in a priority queue\n are processed before the tasks in a normal queue.
" + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "Optional. Provides more information about the queue position. For example,\n if the task is complete and no longer in the queue, the message field contains\n that information.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the queue for the specified quantum task.
" + } + }, "com.amazonaws.braket#QuantumTaskResource": { "type": "resource", "identifiers": { @@ -2600,6 +2762,36 @@ "target": "com.amazonaws.braket#QuantumTaskSummary" } }, + "com.amazonaws.braket#QueueName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "QUANTUM_TASKS_QUEUE", + "name": "QUANTUM_TASKS_QUEUE" + }, + { + "value": "JOBS_QUEUE", + "name": "JOBS_QUEUE" + } + ] + } + }, + "com.amazonaws.braket#QueuePriority": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Normal", + "name": "NORMAL" + }, + { + "value": "Priority", + "name": "PRIORITY" + } + ] + } + }, "com.amazonaws.braket#ResourceNotFoundException": { "type": "structure", "members": { diff --git a/codegen/sdk/aws-models/chime-sdk-media-pipelines.json b/codegen/sdk/aws-models/chime-sdk-media-pipelines.json index 391e9e027c0..5c3e3513e18 100644 --- a/codegen/sdk/aws-models/chime-sdk-media-pipelines.json +++ b/codegen/sdk/aws-models/chime-sdk-media-pipelines.json @@ -520,6 +520,16 @@ "smithy.api#pattern": "^44100|48000$" } }, + "com.amazonaws.chimesdkmediapipelines#AwsRegion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^([a-z]+-){2,}\\d+$" + } + }, "com.amazonaws.chimesdkmediapipelines#BadRequestException": { "type": "structure", "members": { @@ -759,6 +769,12 @@ { "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaLiveConnectorPipeline" }, + { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPool" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipeline" + }, { "target": "com.amazonaws.chimesdkmediapipelines#DeleteMediaCapturePipeline" }, @@ -768,6 +784,9 @@ { "target": "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipeline" }, + { + "target": "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipelineKinesisVideoStreamPool" + }, { "target": "com.amazonaws.chimesdkmediapipelines#GetMediaCapturePipeline" }, @@ -777,6 +796,9 @@ { "target": "com.amazonaws.chimesdkmediapipelines#GetMediaPipeline" }, + { + "target": "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPool" + }, { "target": "com.amazonaws.chimesdkmediapipelines#GetSpeakerSearchTask" }, @@ -789,6 +811,9 @@ { "target": "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurations" }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPools" + }, { "target": "com.amazonaws.chimesdkmediapipelines#ListMediaPipelines" }, @@ -818,6 +843,9 @@ }, { "target": "com.amazonaws.chimesdkmediapipelines#UpdateMediaInsightsPipelineStatus" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPool" } ], "traits": { @@ -2365,6 +2393,190 @@ "smithy.api#output": {} } }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPool": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPoolRequest" + }, + "output": { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPoolResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ConflictException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ResourceLimitExceededException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Creates an Kinesis video stream pool for the media pipeline.
", + "smithy.api#http": { + "method": "POST", + "uri": "/media-pipeline-kinesis-video-stream-pools", + "code": 201 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPoolRequest": { + "type": "structure", + "members": { + "StreamConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamConfiguration", + "traits": { + "smithy.api#documentation": "The configuration settings for the video stream.
", + "smithy.api#required": {} + } + }, + "PoolName": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolName", + "traits": { + "smithy.api#documentation": "The name of the video stream pool.
", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.chimesdkmediapipelines#ClientRequestToken", + "traits": { + "smithy.api#documentation": "The token assigned to the client making the request.
", + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.chimesdkmediapipelines#TagList", + "traits": { + "smithy.api#documentation": "The tags assigned to the video stream pool.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaPipelineKinesisVideoStreamPoolResponse": { + "type": "structure", + "members": { + "KinesisVideoStreamPoolConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolConfiguration", + "traits": { + "smithy.api#documentation": "The configuration for the Kinesis video stream pool.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipeline": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipelineRequest" + }, + "output": { + "target": "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipelineResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#NotFoundException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ResourceLimitExceededException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a streaming media pipeline.
", + "smithy.api#http": { + "method": "POST", + "uri": "/sdk-media-stream-pipelines", + "code": 201 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipelineRequest": { + "type": "structure", + "members": { + "Sources": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSourceList", + "traits": { + "smithy.api#documentation": "The data sources for the media pipeline.
", + "smithy.api#required": {} + } + }, + "Sinks": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSinkList", + "traits": { + "smithy.api#documentation": "The data sink for the media pipeline.
", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.chimesdkmediapipelines#ClientRequestToken", + "traits": { + "smithy.api#documentation": "The token assigned to the client making the request.
", + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.chimesdkmediapipelines#TagList", + "traits": { + "smithy.api#documentation": "The tags assigned to the media pipeline.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#CreateMediaStreamPipelineResponse": { + "type": "structure", + "members": { + "MediaStreamPipeline": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamPipeline", + "traits": { + "smithy.api#documentation": "The requested media pipeline.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.chimesdkmediapipelines#DataChannelConcatenationConfiguration": { "type": "structure", "members": { @@ -2380,6 +2592,22 @@ "smithy.api#documentation": "The content configuration object's data channel.
" } }, + "com.amazonaws.chimesdkmediapipelines#DataRetentionChangeInHours": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#DataRetentionInHours": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, "com.amazonaws.chimesdkmediapipelines#DeleteMediaCapturePipeline": { "type": "operation", "input": { @@ -2538,6 +2766,65 @@ } } }, + "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipelineKinesisVideoStreamPool": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipelineKinesisVideoStreamPoolRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ConflictException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#NotFoundException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes an Kinesis video stream pool.
", + "smithy.api#http": { + "method": "DELETE", + "uri": "/media-pipeline-kinesis-video-stream-pools/{Identifier}", + "code": 204 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipelineKinesisVideoStreamPoolRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.chimesdkmediapipelines#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ID of the pool being deleted.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.chimesdkmediapipelines#DeleteMediaPipelineRequest": { "type": "structure", "members": { @@ -2873,6 +3160,76 @@ } } }, + "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPool": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPoolRequest" + }, + "output": { + "target": "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPoolResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#NotFoundException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Gets an Kinesis video stream pool.
", + "smithy.api#http": { + "method": "GET", + "uri": "/media-pipeline-kinesis-video-stream-pools/{Identifier}", + "code": 200 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPoolRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.chimesdkmediapipelines#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ID of the video stream pool.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineKinesisVideoStreamPoolResponse": { + "type": "structure", + "members": { + "KinesisVideoStreamPoolConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolConfiguration", + "traits": { + "smithy.api#documentation": "The video stream pool configuration object.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.chimesdkmediapipelines#GetMediaPipelineRequest": { "type": "structure", "members": { @@ -3035,7 +3392,7 @@ "VoiceToneAnalysisTaskId": { "target": "com.amazonaws.chimesdkmediapipelines#GuidString", "traits": { - "smithy.api#documentation": "The ID of the voice tone anlysis task.
", + "smithy.api#documentation": "The ID of the voice tone analysis task.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3185,7 +3542,7 @@ "TileAspectRatio": { "target": "com.amazonaws.chimesdkmediapipelines#TileAspectRatio", "traits": { - "smithy.api#documentation": "Sets the aspect ratio of the video tiles, such as 16:9.
" + "smithy.api#documentation": "Specifies the aspect ratio of all video tiles.
" } } }, @@ -3306,6 +3663,192 @@ "smithy.api#pattern": "^arn:[a-z\\d-]+:kinesisvideo:[a-z0-9-]+:[0-9]+:[a-z]+/[a-zA-Z0-9_.-]+/[0-9]+$" } }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamConfiguration": { + "type": "structure", + "members": { + "Region": { + "target": "com.amazonaws.chimesdkmediapipelines#AwsRegion", + "traits": { + "smithy.api#documentation": "The Amazon Web Services Region of the video stream.
", + "smithy.api#required": {} + } + }, + "DataRetentionInHours": { + "target": "com.amazonaws.chimesdkmediapipelines#DataRetentionInHours", + "traits": { + "smithy.api#documentation": "The amount of time that data is retained.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration of an Kinesis video stream.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamConfigurationUpdate": { + "type": "structure", + "members": { + "DataRetentionInHours": { + "target": "com.amazonaws.chimesdkmediapipelines#DataRetentionChangeInHours", + "traits": { + "smithy.api#documentation": "The updated time that data is retained.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The updated Kinesis video stream configuration object.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolConfiguration": { + "type": "structure", + "members": { + "PoolArn": { + "target": "com.amazonaws.chimesdkmediapipelines#Arn", + "traits": { + "smithy.api#documentation": "The ARN of the video stream pool configuration.
" + } + }, + "PoolName": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolName", + "traits": { + "smithy.api#documentation": "The name of the video stream pool configuration.
" + } + }, + "PoolId": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolId", + "traits": { + "smithy.api#documentation": "The ID of the video stream pool in the configuration.
" + } + }, + "PoolStatus": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolStatus", + "traits": { + "smithy.api#documentation": "The status of the video stream pool in the configuration.
" + } + }, + "PoolSize": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSize", + "traits": { + "smithy.api#documentation": "The size of the video stream pool in the configuration.
" + } + }, + "StreamConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamConfiguration", + "traits": { + "smithy.api#documentation": "The Kinesis video stream pool configuration object.
" + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.chimesdkmediapipelines#Iso8601Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the configuration was created.
" + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.chimesdkmediapipelines#Iso8601Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the configuration was updated.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The video stream pool configuration object.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[0-9a-zA-Z._-]+$" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[0-9a-zA-Z._-]+$" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSize": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATING" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSummary": { + "type": "structure", + "members": { + "PoolName": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolName", + "traits": { + "smithy.api#documentation": "The name of the video stream pool.
" + } + }, + "PoolId": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolId", + "traits": { + "smithy.api#documentation": "The ID of the video stream pool.
" + } + }, + "PoolArn": { + "target": "com.amazonaws.chimesdkmediapipelines#Arn", + "traits": { + "smithy.api#documentation": "The ARN of the video stream pool.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A summary of the Kinesis video stream pool.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSummary" + } + }, "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamRecordingSourceRuntimeConfiguration": { "type": "structure", "members": { @@ -3410,24 +3953,111 @@ "smithy.api#pattern": "^[a-zA-Z-,]+$" } }, - "com.amazonaws.chimesdkmediapipelines#LayoutOption": { - "type": "enum", + "com.amazonaws.chimesdkmediapipelines#LayoutOption": { + "type": "enum", + "members": { + "GridView": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GridView" + } + } + } + }, + "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelines": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesRequest" + }, + "output": { + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ResourceLimitExceededException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of media pipelines.
", + "smithy.api#http": { + "method": "GET", + "uri": "/sdk-media-capture-pipelines", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.chimesdkmediapipelines#String", + "traits": { + "smithy.api#documentation": "The token used to retrieve the next page of results.
", + "smithy.api#httpQuery": "next-token" + } + }, + "MaxResults": { + "target": "com.amazonaws.chimesdkmediapipelines#ResultMax", + "traits": { + "smithy.api#documentation": "The maximum number of results to return in a single call. Valid Range: 1 - 99.
", + "smithy.api#httpQuery": "max-results" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesResponse": { + "type": "structure", "members": { - "GridView": { - "target": "smithy.api#Unit", + "MediaCapturePipelines": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaCapturePipelineSummaryList", "traits": { - "smithy.api#enumValue": "GridView" + "smithy.api#documentation": "The media pipeline objects in the list.
" + } + }, + "NextToken": { + "target": "com.amazonaws.chimesdkmediapipelines#String", + "traits": { + "smithy.api#documentation": "The token used to retrieve the next page of results.
" } } + }, + "traits": { + "smithy.api#output": {} } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelines": { + "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurations": { "type": "operation", "input": { - "target": "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesRequest" + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsRequest" }, "output": { - "target": "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesResponse" + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsResponse" }, "errors": [ { @@ -3453,10 +4083,10 @@ } ], "traits": { - "smithy.api#documentation": "Returns a list of media pipelines.
", + "smithy.api#documentation": "Lists the available media insights pipeline configurations.
", "smithy.api#http": { "method": "GET", - "uri": "/sdk-media-capture-pipelines", + "uri": "/media-insights-pipeline-configurations", "code": 200 }, "smithy.api#paginated": { @@ -3466,20 +4096,20 @@ } } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesRequest": { + "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsRequest": { "type": "structure", "members": { "NextToken": { "target": "com.amazonaws.chimesdkmediapipelines#String", "traits": { - "smithy.api#documentation": "The token used to retrieve the next page of results.
", + "smithy.api#documentation": "The token used to return the next page of results.
", "smithy.api#httpQuery": "next-token" } }, "MaxResults": { "target": "com.amazonaws.chimesdkmediapipelines#ResultMax", "traits": { - "smithy.api#documentation": "The maximum number of results to return in a single call. Valid Range: 1 - 99.
", + "smithy.api#documentation": "The maximum number of results to return in a single call.
", "smithy.api#httpQuery": "max-results" } } @@ -3488,19 +4118,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaCapturePipelinesResponse": { + "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsResponse": { "type": "structure", "members": { - "MediaCapturePipelines": { - "target": "com.amazonaws.chimesdkmediapipelines#MediaCapturePipelineSummaryList", + "MediaInsightsPipelineConfigurations": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaInsightsPipelineConfigurationSummaryList", "traits": { - "smithy.api#documentation": "The media pipeline objects in the list.
" + "smithy.api#documentation": "The requested list of media insights pipeline configurations.
" } }, "NextToken": { "target": "com.amazonaws.chimesdkmediapipelines#String", "traits": { - "smithy.api#documentation": "The token used to retrieve the next page of results.
" + "smithy.api#documentation": "The token used to return the next page of results.
" } } }, @@ -3508,13 +4138,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurations": { + "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPools": { "type": "operation", "input": { - "target": "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsRequest" + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPoolsRequest" }, "output": { - "target": "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsResponse" + "target": "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPoolsResponse" }, "errors": [ { @@ -3540,10 +4170,10 @@ } ], "traits": { - "smithy.api#documentation": "Lists the available media insights pipeline configurations.
", + "smithy.api#documentation": "Lists the video stream pools in the media pipeline.
", "smithy.api#http": { "method": "GET", - "uri": "/media-insights-pipeline-configurations", + "uri": "/media-pipeline-kinesis-video-stream-pools", "code": 200 }, "smithy.api#paginated": { @@ -3553,13 +4183,13 @@ } } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsRequest": { + "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPoolsRequest": { "type": "structure", "members": { "NextToken": { "target": "com.amazonaws.chimesdkmediapipelines#String", "traits": { - "smithy.api#documentation": "The token used to return the next page of results.
", + "smithy.api#documentation": "The token used to return the next page of results.
", "smithy.api#httpQuery": "next-token" } }, @@ -3575,19 +4205,19 @@ "smithy.api#input": {} } }, - "com.amazonaws.chimesdkmediapipelines#ListMediaInsightsPipelineConfigurationsResponse": { + "com.amazonaws.chimesdkmediapipelines#ListMediaPipelineKinesisVideoStreamPoolsResponse": { "type": "structure", "members": { - "MediaInsightsPipelineConfigurations": { - "target": "com.amazonaws.chimesdkmediapipelines#MediaInsightsPipelineConfigurationSummaryList", + "KinesisVideoStreamPools": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolSummaryList", "traits": { - "smithy.api#documentation": "The requested list of media insights pipeline configurations.
" + "smithy.api#documentation": "The list of video stream pools.
" } }, "NextToken": { "target": "com.amazonaws.chimesdkmediapipelines#String", "traits": { - "smithy.api#documentation": "The token used to return the next page of results.
" + "smithy.api#documentation": "The token used to return the next page of results.
" } } }, @@ -4477,6 +5107,12 @@ "traits": { "smithy.api#documentation": "The media insights pipeline of a media pipeline.
" } + }, + "MediaStreamPipeline": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamPipeline", + "traits": { + "smithy.api#documentation": "Designates a media pipeline as a media stream pipeline.
" + } } }, "traits": { @@ -4698,6 +5334,165 @@ } } }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamPipeline": { + "type": "structure", + "members": { + "MediaPipelineId": { + "target": "com.amazonaws.chimesdkmediapipelines#GuidString", + "traits": { + "smithy.api#documentation": "The ID of the media stream pipeline
" + } + }, + "MediaPipelineArn": { + "target": "com.amazonaws.chimesdkmediapipelines#AmazonResourceName", + "traits": { + "smithy.api#documentation": "The ARN of the media stream pipeline.
" + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.chimesdkmediapipelines#Iso8601Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the media stream pipeline was created.
" + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.chimesdkmediapipelines#Iso8601Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the media stream pipeline was updated.
" + } + }, + "Status": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaPipelineStatus", + "traits": { + "smithy.api#documentation": "The status of the media stream pipeline.
" + } + }, + "Sources": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSourceList", + "traits": { + "smithy.api#documentation": "The media stream pipeline's data sources.
" + } + }, + "Sinks": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSinkList", + "traits": { + "smithy.api#documentation": "The media stream pipeline's data sinks.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Structure that contains the settings for a media stream pipeline.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamPipelineSinkType": { + "type": "enum", + "members": { + "KinesisVideoStreamPool": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "KinesisVideoStreamPool" + } + } + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamSink": { + "type": "structure", + "members": { + "SinkArn": { + "target": "com.amazonaws.chimesdkmediapipelines#Arn", + "traits": { + "smithy.api#documentation": "The ARN of the media stream sink.
", + "smithy.api#required": {} + } + }, + "SinkType": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamPipelineSinkType", + "traits": { + "smithy.api#documentation": "The media stream sink's type.
", + "smithy.api#required": {} + } + }, + "ReservedStreamCapacity": { + "target": "com.amazonaws.chimesdkmediapipelines#ReservedStreamCapacity", + "traits": { + "smithy.api#documentation": "Specifies the number of streams that the sink can accept.
", + "smithy.api#required": {} + } + }, + "MediaStreamType": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamType", + "traits": { + "smithy.api#documentation": "The media stream sink's media stream type.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Structure that contains the settings for a media stream sink.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamSinkList": { + "type": "list", + "member": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSink" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamSource": { + "type": "structure", + "members": { + "SourceType": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaPipelineSourceType", + "traits": { + "smithy.api#documentation": "The type of media stream source.
", + "smithy.api#required": {} + } + }, + "SourceArn": { + "target": "com.amazonaws.chimesdkmediapipelines#Arn", + "traits": { + "smithy.api#documentation": "The ARN of the media stream source.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Structure that contains the settings for media stream sources.
" + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamSourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.chimesdkmediapipelines#MediaStreamSource" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#MediaStreamType": { + "type": "enum", + "members": { + "MixedAudio": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MixedAudio" + } + }, + "IndividualAudio": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IndividualAudio" + } + } + } + }, "com.amazonaws.chimesdkmediapipelines#MeetingEventsConcatenationConfiguration": { "type": "structure", "members": { @@ -5023,6 +5818,15 @@ } } }, + "com.amazonaws.chimesdkmediapipelines#ReservedStreamCapacity": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.chimesdkmediapipelines#ResolutionOption": { "type": "enum", "members": { @@ -6205,6 +7009,85 @@ "smithy.api#input": {} } }, + "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPool": { + "type": "operation", + "input": { + "target": "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPoolRequest" + }, + "output": { + "target": "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPoolResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chimesdkmediapipelines#BadRequestException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ConflictException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ForbiddenException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#NotFoundException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceFailureException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#ThrottledClientException" + }, + { + "target": "com.amazonaws.chimesdkmediapipelines#UnauthorizedClientException" + } + ], + "traits": { + "smithy.api#documentation": "Updates an Kinesis video stream pool in a media pipeline.
", + "smithy.api#http": { + "method": "PUT", + "uri": "/media-pipeline-kinesis-video-stream-pools/{Identifier}", + "code": 200 + } + } + }, + "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPoolRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.chimesdkmediapipelines#NonEmptyString", + "traits": { + "smithy.api#documentation": "The ID of the video stream pool.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "StreamConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamConfigurationUpdate", + "traits": { + "smithy.api#documentation": "The configuration settings for the video stream.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chimesdkmediapipelines#UpdateMediaPipelineKinesisVideoStreamPoolResponse": { + "type": "structure", + "members": { + "KinesisVideoStreamPoolConfiguration": { + "target": "com.amazonaws.chimesdkmediapipelines#KinesisVideoStreamPoolConfiguration", + "traits": { + "smithy.api#documentation": "The video stream pool configuration object.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.chimesdkmediapipelines#VerticalLayoutConfiguration": { "type": "structure", "members": { diff --git a/codegen/sdk/aws-models/cloudwatch-events.json b/codegen/sdk/aws-models/cloudwatch-events.json index e895f22720d..6c406fd137e 100644 --- a/codegen/sdk/aws-models/cloudwatch-events.json +++ b/codegen/sdk/aws-models/cloudwatch-events.json @@ -1652,6 +1652,17 @@ "smithy.api#pattern": "^[ \\t]*[^\\x00-\\x1F:\\x7F]+([ \\t]+[^\\x00-\\x1F:\\x7F]+)*[ \\t]*$" } }, + "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^[ \\t]*[^\\x00-\\x1F:\\x7F]+([ \\t]+[^\\x00-\\x1F:\\x7F]+)*[ \\t]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.cloudwatchevents#AwsVpcConfiguration": { "type": "structure", "members": { @@ -2086,7 +2097,7 @@ } }, "Value": { - "target": "com.amazonaws.cloudwatchevents#String", + "target": "com.amazonaws.cloudwatchevents#SensitiveString", "traits": { "smithy.api#documentation": "The value associated with the key.
" } @@ -2135,7 +2146,7 @@ } }, "Value": { - "target": "com.amazonaws.cloudwatchevents#HeaderValue", + "target": "com.amazonaws.cloudwatchevents#HeaderValueSensitive", "traits": { "smithy.api#documentation": "The value associated with the key.
" } @@ -2279,7 +2290,7 @@ } }, "Value": { - "target": "com.amazonaws.cloudwatchevents#QueryStringValue", + "target": "com.amazonaws.cloudwatchevents#QueryStringValueSensitive", "traits": { "smithy.api#documentation": "The value associated with the key for the query string parameter.
" } @@ -2615,7 +2626,7 @@ } }, "ApiKeyValue": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The value for the API key to use for authorization.
", "smithy.api#required": {} @@ -2669,7 +2680,7 @@ } }, "Password": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The password associated with the user name to use for Basic authorization.
", "smithy.api#required": {} @@ -2691,7 +2702,7 @@ } }, "ClientSecret": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The client secret associated with the client ID to use for OAuth authorization for the\n connection.
", "smithy.api#required": {} @@ -4543,6 +4554,17 @@ "smithy.api#pattern": "^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$" } }, + "com.amazonaws.cloudwatchevents#HeaderValueSensitive": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^[ \\t]*[\\x20-\\x7E]+([ \\t]+[\\x20-\\x7E]+)*[ \\t]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.cloudwatchevents#HttpParameters": { "type": "structure", "members": { @@ -6499,6 +6521,17 @@ "smithy.api#pattern": "^[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+$" } }, + "com.amazonaws.cloudwatchevents#QueryStringValueSensitive": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^[^\\x00-\\x09\\x0B\\x0C\\x0E-\\x1F\\x7F]+$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.cloudwatchevents#RedshiftDataParameters": { "type": "structure", "members": { @@ -7263,6 +7296,12 @@ "smithy.api#pattern": "^arn:aws([a-z]|\\-)*:secretsmanager:([a-z]|\\d|\\-)*:([0-9]{12})?:secret:[\\/_+=\\.@\\-A-Za-z0-9]+$" } }, + "com.amazonaws.cloudwatchevents#SensitiveString": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.cloudwatchevents#Sql": { "type": "string", "traits": { @@ -8088,7 +8127,7 @@ } }, "ApiKeyValue": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The value associated with teh API key to use for authorization.
" } @@ -8140,7 +8179,7 @@ } }, "Password": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The password associated with the user name to use for Basic authorization.
" } @@ -8160,7 +8199,7 @@ } }, "ClientSecret": { - "target": "com.amazonaws.cloudwatchevents#AuthHeaderParameters", + "target": "com.amazonaws.cloudwatchevents#AuthHeaderParametersSensitive", "traits": { "smithy.api#documentation": "The client secret assciated with the client ID to use for OAuth authorization.
" } diff --git a/codegen/sdk/aws-models/cloudwatch-logs.json b/codegen/sdk/aws-models/cloudwatch-logs.json index a3f0e0d634f..e7515407cdb 100644 --- a/codegen/sdk/aws-models/cloudwatch-logs.json +++ b/codegen/sdk/aws-models/cloudwatch-logs.json @@ -219,6 +219,16 @@ "smithy.api#input": {} } }, + "com.amazonaws.cloudwatchlogs#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 128 + }, + "smithy.api#pattern": "^\\S{36,128}$" + } + }, "com.amazonaws.cloudwatchlogs#CreateExportTask": { "type": "operation", "input": { @@ -345,7 +355,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a log group with the specified name. You can create up to 20,000 log groups per account.
\nYou must use the following guidelines when naming a log group:
\nLog group names must be unique within a Region for an Amazon Web Services\n account.
\nLog group names can be between 1 and 512 characters long.
\nLog group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), \n '/' (forward slash), '.' (period), and '#' (number sign)
\nWhen you create a log group, by default the log events in the log group do not expire.\n To set a retention policy so that events expire and are deleted after a specified time, use\n PutRetentionPolicy.
\nIf you associate an KMS key with the log group, ingested data is\n encrypted using the KMS key. This association is stored as long as the data\n encrypted with the KMS key is still within CloudWatch Logs. This enables\n CloudWatch Logs to decrypt this data whenever it is requested.
\nIf you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an\n InvalidParameterException
error.
CloudWatch Logs supports only symmetric KMS keys. Do not associate an\n asymmetric KMS key with your log group. For more information, see Using\n Symmetric and Asymmetric Keys.
\nCreates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.
\nYou must use the following guidelines when naming a log group:
\nLog group names must be unique within a Region for an Amazon Web Services\n account.
\nLog group names can be between 1 and 512 characters long.
\nLog group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), \n '/' (forward slash), '.' (period), and '#' (number sign)
\nWhen you create a log group, by default the log events in the log group do not expire.\n To set a retention policy so that events expire and are deleted after a specified time, use\n PutRetentionPolicy.
\nIf you associate an KMS key with the log group, ingested data is\n encrypted using the KMS key. This association is stored as long as the data\n encrypted with the KMS key is still within CloudWatch Logs. This enables\n CloudWatch Logs to decrypt this data whenever it is requested.
\nIf you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an\n InvalidParameterException
error.
CloudWatch Logs supports only symmetric KMS keys. Do not associate an\n asymmetric KMS key with your log group. For more information, see Using\n Symmetric and Asymmetric Keys.
\nCreates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.
\nThe maximum number of metric filters that can be associated with a log group is\n 100.
\nWhen you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
CloudWatch Logs disables a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within a certain amount of time. This helps to prevent accidental\n high charges.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n
\nCreates or updates a metric filter and associates it with the specified log group. With\n metric filters, you can configure rules to extract metric data from log events ingested\n through PutLogEvents.
\nThe maximum number of metric filters that can be associated with a log group is\n 100.
\nWhen you create a metric filter, you can also optionally assign a unit and dimensions\n to the metric that is created.
\nMetrics extracted from log events are charged as custom metrics.\n To prevent unexpected high charges, do not specify high-cardinality fields such as \n IPAddress
or requestID
as dimensions. Each different value \n found for \n a dimension is treated as a separate metric and accrues charges as a separate custom metric.\n
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for\n your specified dimensions within one hour.
\nYou can also set up a billing alarm to alert you if your charges are higher than \n expected. For more information, \n see \n Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.\n
\nThe query string to use for this definition. \n For more information, see CloudWatch Logs Insights Query Syntax.
", "smithy.api#required": {} } + }, + "clientToken": { + "target": "com.amazonaws.cloudwatchlogs#ClientToken", + "traits": { + "smithy.api#documentation": "Used as an idempotency token, to avoid returning an exception if the service receives the same request twice because of a network\n error.
", + "smithy.api#idempotencyToken": {} + } } }, "traits": { @@ -5202,7 +5219,7 @@ } ], "traits": { - "smithy.api#documentation": "Sets the retention of the specified log group. With a retention policy, you can\n configure the number of days for which to retain log events in the specified log\n group.
\nCloudWatch Logs doesn’t immediately delete log events when they reach their retention\n setting. It typically takes up to 72 hours after that before log events are deleted, but in\n rare situations might take longer.
\nTo illustrate, imagine that you change a log group to have a longer retention setting\n when it contains log events that are past the expiration date, but haven’t been deleted.\n Those log events will take up to 72 hours to be deleted after the new retention date is\n reached. To make sure that log data is deleted permanently, keep a log group at its lower\n retention setting until 72 hours after the previous retention period ends. Alternatively,\n wait to change the retention setting until you confirm that the earlier log events are\n deleted.
\nSets the retention of the specified log group. With a retention policy, you can\n configure the number of days for which to retain log events in the specified log\n group.
\nCloudWatch Logs doesn’t immediately delete log events when they reach their retention\n setting. It typically takes up to 72 hours after that before log events are deleted, but in\n rare situations might take longer.
\nTo illustrate, imagine that you change a log group to have a longer retention setting\n when it contains log events that are past the expiration date, but haven’t been deleted.\n Those log events will take up to 72 hours to be deleted after the new retention date is\n reached. To make sure that log data is deleted permanently, keep a log group at its lower\n retention setting until 72 hours after the previous retention period ends. Alternatively,\n wait to change the retention setting until you confirm that the earlier log events are\n deleted.
\nWhen log events reach their retention setting they are marked for deletion. After\n they are marked for deletion, they do not add to your archival storage costs anymore, even if \n they are not actually deleted until later. These log events marked for deletion are also not \n included when you use an API to retrieve the storedBytes
value to see how many bytes a log group is storing.
An application with the specified name with the IAM user or Amazon Web Services account already exists.
", + "smithy.api#documentation": "An application with the specified name with the user or Amazon Web Services account\n already exists.
", "smithy.api#error": "client" } }, @@ -207,7 +207,7 @@ } }, "traits": { - "smithy.api#documentation": "The application does not exist with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The application does not exist with the user or Amazon Web Services account.
", "smithy.api#error": "client" } }, @@ -625,7 +625,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the applicable\n IAM or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the applicable user\n or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -2379,7 +2379,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of the application. This name must be unique with the applicable IAM or Amazon Web Services account.
", + "smithy.api#documentation": "The name of the application. This name must be unique with the applicable user or\n Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -2711,7 +2711,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -2846,7 +2846,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -2865,7 +2865,7 @@ "deploymentConfigName": { "target": "com.amazonaws.codedeploy#DeploymentConfigName", "traits": { - "smithy.api#documentation": "The name of a deployment configuration associated with the IAM user or\n Amazon Web Services account.
\nIf not specified, the value configured in the deployment group is used as the default.\n If the deployment group does not have a deployment configuration associated with it,\n CodeDeployDefault
.OneAtATime
is used by default.
The name of a deployment configuration associated with the user or Amazon Web Services account.
\nIf not specified, the value configured in the deployment group is used as the default.\n If the deployment group does not have a deployment configuration associated with it,\n CodeDeployDefault
.OneAtATime
is used by default.
Allows you to specify information about alarms associated with a deployment. The alarm\n configuration that you specify here will override the alarm configuration at the\n deployment group level. Consider overriding the alarm configuration if you have set up\n alarms at the deployment group level that are causing deployment failures. In this case,\n you would call CreateDeployment
to create a new deployment that uses a\n previous application revision that is known to work, and set its alarm configuration to\n turn off alarm polling. Turning off alarm polling ensures that the new deployment\n proceeds without being blocked by the alarm that was generated by the previous, failed,\n deployment.
If you specify an overrideAlarmConfiguration
, you need the\n UpdateDeploymentGroup
IAM permission when calling\n CreateDeployment
.
Allows you to specify information about alarms associated with a deployment. The alarm\n configuration that you specify here will override the alarm configuration at the\n deployment group level. Consider overriding the alarm configuration if you have set up\n alarms at the deployment group level that are causing deployment failures. In this case,\n you would call CreateDeployment
to create a new deployment that uses a\n previous application revision that is known to work, and set its alarm configuration to\n turn off alarm polling. Turning off alarm polling ensures that the new deployment\n proceeds without being blocked by the alarm that was generated by the previous, failed,\n deployment.
If you specify an overrideAlarmConfiguration
, you need the\n UpdateDeploymentGroup
\n IAM permission when calling CreateDeployment
.
The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } } @@ -3004,7 +3004,7 @@ "deploymentConfigName": { "target": "com.amazonaws.codedeploy#DeploymentConfigName", "traits": { - "smithy.api#documentation": "The name of a deployment configuration associated with the IAM user or\n Amazon Web Services account.
", + "smithy.api#documentation": "The name of a deployment configuration associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } } @@ -3049,7 +3049,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -3149,7 +3149,7 @@ "target": "com.amazonaws.codedeploy#DeleteResourcesByExternalIdOutput" }, "traits": { - "smithy.api#documentation": "Deletes resources linked to an external ID.
" + "smithy.api#documentation": "Deletes resources linked to an external ID. This action only applies if you have\n configured blue/green deployments through CloudFormation.
\nIt is not necessary to call this action directly. CloudFormation calls it\n on your behalf when it needs to delete stack resources. This action is offered\n publicly in case you need to delete resources to comply with General Data Protection\n Regulation (GDPR) requirements.
\nA deployment configuration with the specified name with the IAM user or\n Amazon Web Services account already exists.
", + "smithy.api#documentation": "A deployment configuration with the specified name with the user or Amazon Web Services account already exists.
", "smithy.api#error": "client" } }, @@ -3214,7 +3214,7 @@ } }, "traits": { - "smithy.api#documentation": "The deployment configuration does not exist with the IAM user or\n Amazon Web Services account.
", + "smithy.api#documentation": "The deployment configuration does not exist with the user or Amazon Web Services account.
", "smithy.api#error": "client" } }, @@ -3383,7 +3383,7 @@ } }, "traits": { - "smithy.api#documentation": "The deployment with the IAM user or Amazon Web Services account does not\n exist.
", + "smithy.api#documentation": "The deployment with the user or Amazon Web Services account does not exist.
", "smithy.api#error": "client" } }, @@ -3398,7 +3398,7 @@ } }, "traits": { - "smithy.api#documentation": "A deployment group with the specified name with the IAM user or Amazon Web Services account already exists.
", + "smithy.api#documentation": "A deployment group with the specified name with the user or Amazon Web Services account\n already exists.
", "smithy.api#error": "client" } }, @@ -3413,7 +3413,7 @@ } }, "traits": { - "smithy.api#documentation": "The named deployment group with the IAM user or Amazon Web Services account does not exist.
", + "smithy.api#documentation": "The named deployment group with the user or Amazon Web Services account does not\n exist.
", "smithy.api#error": "client" } }, @@ -4558,12 +4558,12 @@ "name": { "target": "com.amazonaws.codedeploy#ELBName", "traits": { - "smithy.api#documentation": "For blue/green deployments, the name of the load balancer that is used to route\n traffic from original instances to replacement instances in a blue/green deployment. For\n in-place deployments, the name of the load balancer that instances are deregistered from\n so they are not serving traffic during a deployment, and then re-registered with after\n the deployment is complete.
" + "smithy.api#documentation": "For blue/green deployments, the name of the Classic Load Balancer that is used to route\n traffic from original instances to replacement instances in a blue/green deployment. For\n in-place deployments, the name of the Classic Load Balancer that instances are deregistered from\n so they are not serving traffic during a deployment, and then re-registered with after\n the deployment is complete.
" } } }, "traits": { - "smithy.api#documentation": "Information about a load balancer in Elastic Load Balancing to use in a deployment.\n Instances are registered directly with a load balancer, and traffic is routed to the\n load balancer.
" + "smithy.api#documentation": "Information about a Classic Load Balancer in Elastic Load Balancing to use in a deployment.\n Instances are registered directly with a load balancer, and traffic is routed to the\n load balancer.
" } }, "com.amazonaws.codedeploy#ELBInfoList": { @@ -4912,7 +4912,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } } @@ -5111,7 +5111,7 @@ "deploymentConfigName": { "target": "com.amazonaws.codedeploy#DeploymentConfigName", "traits": { - "smithy.api#documentation": "The name of a deployment configuration associated with the IAM user or\n Amazon Web Services account.
", + "smithy.api#documentation": "The name of a deployment configuration associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } } @@ -5177,7 +5177,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -5215,7 +5215,7 @@ "deploymentId": { "target": "com.amazonaws.codedeploy#DeploymentId", "traits": { - "smithy.api#documentation": "The unique ID of a deployment associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The unique ID of a deployment associated with the user or Amazon Web Services account.\n
", "smithy.api#required": {} } } @@ -5543,7 +5543,7 @@ } }, "traits": { - "smithy.api#documentation": "No IAM ARN was included in the request. You must use an IAM session ARN or IAM user ARN in the request.
", + "smithy.api#documentation": "No IAM ARN was included in the request. You must use an IAM session ARN or user ARN in the request.
", "smithy.api#error": "client" } }, @@ -5579,7 +5579,7 @@ } }, "traits": { - "smithy.api#documentation": "The specified IAM user ARN is already registered with an on-premises\n instance.
", + "smithy.api#documentation": "The specified user ARN is already registered with an on-premises instance.
", "smithy.api#error": "client" } }, @@ -5594,7 +5594,7 @@ } }, "traits": { - "smithy.api#documentation": "An IAM user ARN was not specified.
", + "smithy.api#documentation": "An user ARN was not specified.
", "smithy.api#error": "client" } }, @@ -5675,13 +5675,13 @@ "iamSessionArn": { "target": "com.amazonaws.codedeploy#IamSessionArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM session associated with the on-premises instance.
" + "smithy.api#documentation": "The ARN of the IAM session associated with the on-premises\n instance.
" } }, "iamUserArn": { "target": "com.amazonaws.codedeploy#IamUserArn", "traits": { - "smithy.api#documentation": "The IAM user ARN associated with the on-premises instance.
" + "smithy.api#documentation": "The user ARN associated with the on-premises instance.
" } }, "instanceArn": { @@ -6364,7 +6364,7 @@ } }, "traits": { - "smithy.api#documentation": "The IAM user ARN was specified in an invalid format.
", + "smithy.api#documentation": "The user ARN was specified in an invalid format.
", "smithy.api#error": "client" } }, @@ -7138,7 +7138,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -7219,7 +7219,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the applications registered with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "Lists the applications registered with the user or Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7277,7 +7277,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the deployment configurations with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "Lists the deployment configurations with the user or Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7344,7 +7344,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the deployment groups for an application registered with the IAM\n user or Amazon Web Services account.
", + "smithy.api#documentation": "Lists the deployment groups for an application registered with the Amazon Web Services\n user or Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7358,7 +7358,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -7445,7 +7445,7 @@ "smithy.api#deprecated": { "message": "This operation is deprecated, use ListDeploymentTargets instead." }, - "smithy.api#documentation": " The newer BatchGetDeploymentTargets
should be used instead because\n it works with all compute types. ListDeploymentInstances
throws an\n exception if it is used with a compute platform other than EC2/On-premises or\n Lambda.
Lists the instance for a deployment associated with the IAM user or\n Amazon Web Services account.
", + "smithy.api#documentation": " The newer BatchGetDeploymentTargets
should be used instead because\n it works with all compute types. ListDeploymentInstances
throws an\n exception if it is used with a compute platform other than EC2/On-premises or\n Lambda.
Lists the instance for a deployment associated with the user or Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7636,7 +7636,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the deployments in a deployment group for an application registered with the\n IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "Lists the deployments in a deployment group for an application registered with the\n user or Amazon Web Services account.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7650,7 +7650,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
\nIf applicationName
is specified, then\n deploymentGroupName
must be specified. If it is not specified, then\n deploymentGroupName
must not be specified.
The name of an CodeDeploy application associated with the user or Amazon Web Services account.
\nIf applicationName
is specified, then\n deploymentGroupName
must be specified. If it is not specified, then\n deploymentGroupName
must not be specified.
An array that contains information about the load balancer to use for load balancing\n in a deployment. In Elastic Load Balancing, load balancers are used with Classic Load\n Balancers.
\nAdding more than one load balancer to the array is not supported.
\nAn array that contains information about the load balancers to use for load balancing\n in a deployment. If you're using Classic Load Balancers, specify those load balancers\n in this array.
\nYou can add up to 10 load balancers to the array.
\nIf you're using Application Load Balancers or Network Load Balancers, use the\n targetGroupInfoList
array instead of this one.
An array that contains information about the target group to use for load balancing in\n a deployment. In Elastic Load Balancing, target groups are used with Application Load\n Balancers.
\nAdding more than one target group to the array is not supported.
\nAn array that contains information about the target groups to use for load balancing\n in a deployment. If you're using Application Load Balancers and Network Load Balancers,\n specify their associated target groups in this array.
\nYou can add up to 10 target groups to the array.
\nIf you're using Classic Load Balancers, use the elbInfoList
array\n instead of this one.
Information about the Elastic Load Balancing load balancer or target group used in a\n deployment.
" + "smithy.api#documentation": "Information about the Elastic Load Balancing load balancer or target group used in a\n deployment.
\nYou can use load balancers and target groups in combination. For example, if you have\n two Classic Load Balancers, and five target groups tied to an Application Load Balancer,\n you can specify the two Classic Load Balancers in elbInfoList
, and the five\n target groups in targetGroupInfoList
.
Both an IAM user ARN and an IAM session ARN were\n included in the request. Use only one ARN type.
", + "smithy.api#documentation": "Both an user ARN and an IAM session ARN were included in the request.\n Use only one ARN type.
", "smithy.api#error": "client" } }, @@ -8233,7 +8233,7 @@ "applicationName": { "target": "com.amazonaws.codedeploy#ApplicationName", "traits": { - "smithy.api#documentation": "The name of an CodeDeploy application associated with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The name of an CodeDeploy application associated with the user or Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -8319,7 +8319,7 @@ "iamUserArn": { "target": "com.amazonaws.codedeploy#IamUserArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM user to associate with the on-premises\n instance.
" + "smithy.api#documentation": "The ARN of the user to associate with the on-premises instance.
" } } }, @@ -8467,7 +8467,7 @@ } }, "traits": { - "smithy.api#documentation": "The named revision does not exist with the IAM user or Amazon Web Services account.
", + "smithy.api#documentation": "The named revision does not exist with the user or Amazon Web Services account.
", "smithy.api#error": "client" } }, @@ -8653,7 +8653,7 @@ "bundleType": { "target": "com.amazonaws.codedeploy#BundleType", "traits": { - "smithy.api#documentation": "The file type of the application revision. Must be one of the following:
\n\n tar
: A tar archive file.
\n tgz
: A compressed tar archive file.
\n zip
: A zip archive file.
The file type of the application revision. Must be one of the following:
\n\n tar
: A tar archive file.
\n tgz
: A compressed tar archive file.
\n zip
: A zip archive file.
\n YAML
: A YAML-formatted file.
\n JSON
: A JSON-formatted file.
The replacement list of Auto Scaling groups to be included in the deployment\n group, if you want to change them.
\nTo keep the Auto Scaling groups, enter their names or do not specify this\n parameter.
\nTo remove Auto Scaling groups, specify a non-null empty list of Auto Scaling group names to detach all CodeDeploy-managed Auto Scaling lifecycle hooks. For examples, see Amazon EC2 instances in an Amazon EC2 Auto Scaling group fail to\n launch and receive the error \"Heartbeat Timeout\" in the\n CodeDeploy User Guide.
\nThe replacement list of Auto Scaling groups to be included in the deployment\n group, if you want to change them.
\nTo keep the Auto Scaling groups, enter their names or do not specify this\n parameter.
\nTo remove Auto Scaling groups, specify a non-null empty list of Auto Scaling group names to detach all CodeDeploy-managed Auto Scaling lifecycle hooks. For examples, see Amazon EC2 instances in an Amazon EC2 Auto Scaling group fail to\n launch and receive the error \"Heartbeat Timeout\" in the\n CodeDeploy User Guide.
\nThe username for the user. Must be unique within the user pool. Must be a UTF-8 string\n between 1 and 128 characters. After the user is created, the username can't be\n changed.
", + "smithy.api#documentation": "The value that you want to set as the username sign-in attribute. The following\n conditions apply to the username parameter.
\nThe username can't be a duplicate of another username in the same user\n pool.
\nYou can't change the value of a username after you create it.
\nYou can only provide a value if usernames are a valid sign-in attribute for\n your user pool. If your user pool only supports phone numbers or email addresses\n as sign-in attributes, Amazon Cognito automatically generates a username value. For more\n information, see Customizing sign-in attributes.
\nThe Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs.\n The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account\n as your user pool.
\nTo send logs to log groups with a resource policy of a size greater than 5120\n characters, configure a log group with a path that starts with\n /aws/vendedlogs
. For more information, see Enabling\n logging from certain Amazon Web Services services.
The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs.\n The log group must not be encrypted with Key Management Service and must be in the same Amazon Web Services account\n as your user pool.
\nTo send logs to log groups with a resource policy of a size greater than 5120\n characters, configure a log group with a path that starts with\n /aws/vendedlogs
. For more information, see Enabling\n logging from certain Amazon Web Services services.
The user pool status in a user pool description.
" } }, @@ -15439,6 +15442,9 @@ "Status": { "target": "com.amazonaws.cognitoidentityprovider#StatusType", "traits": { + "smithy.api#deprecated": { + "message": "This property is no longer available." + }, "smithy.api#documentation": "The status of a user pool.
" } }, diff --git a/codegen/sdk/aws-models/connect.json b/codegen/sdk/aws-models/connect.json index 498409a49ea..6285c163d88 100644 --- a/codegen/sdk/aws-models/connect.json +++ b/codegen/sdk/aws-models/connect.json @@ -978,6 +978,9 @@ { "target": "com.amazonaws.connect#ListSecurityKeys" }, + { + "target": "com.amazonaws.connect#ListSecurityProfileApplications" + }, { "target": "com.amazonaws.connect#ListSecurityProfilePermissions" }, @@ -2059,6 +2062,50 @@ "smithy.api#documentation": "Configuration of the answering machine detection.
" } }, + "com.amazonaws.connect#Application": { + "type": "structure", + "members": { + "Namespace": { + "target": "com.amazonaws.connect#Namespace", + "traits": { + "smithy.api#documentation": "Namespace of the application that you want to give access to.
" + } + }, + "ApplicationPermissions": { + "target": "com.amazonaws.connect#ApplicationPermissions", + "traits": { + "smithy.api#documentation": "The permissions that the agent is granted on the application. Only the ACCESS
permission is supported.
This API is in preview release for Amazon Connect and is subject to change.
\nA third party application's metadata.
" + } + }, + "com.amazonaws.connect#ApplicationPermissions": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#Permission" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.connect#Applications": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#Application" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, "com.amazonaws.connect#ApproximateTotalCount": { "type": "long" }, @@ -5184,7 +5231,7 @@ "Name": { "target": "com.amazonaws.connect#QuickConnectName", "traits": { - "smithy.api#documentation": "The name of the quick connect.
", + "smithy.api#documentation": "A unique name of the quick connect.
", "smithy.api#required": {} } }, @@ -5571,6 +5618,12 @@ "traits": { "smithy.api#documentation": "The list of resources that a security profile applies tag restrictions to in Amazon Connect. Following are acceptable ResourceNames: User
|\n SecurityProfile
| Queue
| RoutingProfile
\n
This API is in preview release for Amazon Connect and is subject to change.
\nA list of third party applications that the security profile will give access to.
" + } } }, "traits": { @@ -12142,6 +12195,12 @@ "traits": { "smithy.api#enumValue": "OnContactEvaluationSubmit" } + }, + "OnMetricDataUpdate": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OnMetricDataUpdate" + } } } }, @@ -14621,6 +14680,12 @@ "traits": { "smithy.api#enumValue": "CASES_DOMAIN" } + }, + "APPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "APPLICATION" + } } } }, @@ -16517,7 +16582,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the phone numbers for the specified Amazon Connect instance.
\nFor more information about phone numbers, see Set Up Phone Numbers for Your\n Contact Center in the Amazon Connect Administrator\n Guide.
\nWe recommend using ListPhoneNumbersV2 to\n return phone number types. ListPhoneNumbers doesn't support number types UIFN
,\n SHARED
, THIRD_PARTY_TF
, and THIRD_PARTY_DID
. While it returns\n numbers of those types, it incorrectly lists them as TOLL_FREE
or DID
.
The phone number Arn
value that is returned from each of the items in the\n PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail\n with a ResourceNotFoundException
. Instead, use the ListPhoneNumbersV2 API.\n It returns the new phone number ARN that can be used to tag phone number resources.
Provides information about the phone numbers for the specified Amazon Connect instance.
\nFor more information about phone numbers, see Set Up Phone Numbers for Your\n Contact Center in the Amazon Connect Administrator\n Guide.
\nWe recommend using ListPhoneNumbersV2 to\n return phone number types. ListPhoneNumbers doesn't support number types UIFN
,\n SHARED
, THIRD_PARTY_TF
, and THIRD_PARTY_DID
. While it\n returns numbers of those types, it incorrectly lists them as TOLL_FREE
or\n DID
.
The phone number Arn
value that is returned from each of the items in the\n PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail\n with a ResourceNotFoundException
. Instead, use the ListPhoneNumbersV2 API.\n It returns the new phone number ARN that can be used to tag phone number resources.
The type of phone number.
", + "smithy.api#documentation": "The type of phone number.
\nWe recommend using ListPhoneNumbersV2 to\n return phone number types. While ListPhoneNumbers returns number types UIFN
,\n SHARED
, THIRD_PARTY_TF
, and THIRD_PARTY_DID
, it\n incorrectly lists them as TOLL_FREE
or DID
.
Returns a list of third party applications in a specific security profile.
", + "smithy.api#http": { + "method": "GET", + "uri": "/security-profiles-applications/{InstanceId}/{SecurityProfileId}", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Applications", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#ListSecurityProfileApplicationsRequest": { + "type": "structure", + "members": { + "SecurityProfileId": { + "target": "com.amazonaws.connect#SecurityProfileId", + "traits": { + "smithy.api#documentation": "The security profile identifier.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "The instance identifier.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken", + "traits": { + "smithy.api#documentation": "The token for the next set of results. The next set of results can be retrieved by using the \n token value returned in the previous response when making the next request.
", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult1000", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "The maximum number of results to return per page.
", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#ListSecurityProfileApplicationsResponse": { + "type": "structure", + "members": { + "Applications": { + "target": "com.amazonaws.connect#Applications", + "traits": { + "smithy.api#documentation": "A list of the third party application's metadata.
" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken", + "traits": { + "smithy.api#documentation": "The token for the next set of results. The next set of results can be retrieved by using the \n token value returned in the previous response when making the next request.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#ListSecurityProfilePermissions": { "type": "operation", "input": { @@ -18956,6 +19120,15 @@ "smithy.api#pattern": "^(^[\\S].*[\\S]$)|(^[\\S]$)$" } }, + "com.amazonaws.connect#Namespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, "com.amazonaws.connect#NextToken": { "type": "string" }, @@ -19396,6 +19569,15 @@ } } }, + "com.amazonaws.connect#Permission": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, "com.amazonaws.connect#PermissionsList": { "type": "list", "member": { @@ -23037,19 +23219,19 @@ "EventBridgeAction": { "target": "com.amazonaws.connect#EventBridgeActionDefinition", "traits": { - "smithy.api#documentation": "Information about the EventBridge action.
" + "smithy.api#documentation": "Information about the EventBridge action.
\nSupported only for TriggerEventSource
values:\n OnPostCallAnalysisAvailable
| OnRealTimeCallAnalysisAvailable
|\n OnPostChatAnalysisAvailable
| OnContactEvaluationSubmit
|\n OnMetricDataUpdate
\n
Information about the contact category action.
" + "smithy.api#documentation": "Information about the contact category action.
\nSupported only for TriggerEventSource
values:\n OnPostCallAnalysisAvailable
| OnRealTimeCallAnalysisAvailable
|\n OnPostChatAnalysisAvailable
| OnZendeskTicketCreate
|\n OnZendeskTicketStatusUpdate
| OnSalesforceCaseCreate
\n
Information about the send notification action.
" + "smithy.api#documentation": "Information about the send notification action.
\nSupported only for TriggerEventSource
values:\n OnPostCallAnalysisAvailable
| OnRealTimeCallAnalysisAvailable
|\n OnPostChatAnalysisAvailable
| OnContactEvaluationSubmit
|\n OnMetricDataUpdate
\n
The name of the event source. This field is required if TriggerEventSource
is one of the\n following values: OnZendeskTicketCreate
| OnZendeskTicketStatusUpdate
|\n OnSalesforceCaseCreate
\n
The name of the event source. This field is required if TriggerEventSource
is one of the\n following values: OnZendeskTicketCreate
| OnZendeskTicketStatusUpdate
|\n OnSalesforceCaseCreate
| OnContactEvaluationSubmit
|\n OnMetricDataUpdate
.
The list of resources that a security profile applies tag restrictions to in Amazon Connect.
" } + }, + "Applications": { + "target": "com.amazonaws.connect#Applications", + "traits": { + "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nA list of the third party application's metadata.
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/database-migration-service.json b/codegen/sdk/aws-models/database-migration-service.json index a52cd5841bf..3c2f3ad3058 100644 --- a/codegen/sdk/aws-models/database-migration-service.json +++ b/codegen/sdk/aws-models/database-migration-service.json @@ -2247,13 +2247,13 @@ "MaxCapacityUnits": { "target": "com.amazonaws.databasemigrationservice#IntegerOptional", "traits": { - "smithy.api#documentation": "Specifies the maximum value of the DMS capacity units (DCUs) for which a given DMS Serverless\n replication can be provisioned. A single DCU is 2GB of RAM, with 2 DCUs as the minimum value allowed.\n The list of valid DCU values includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value\n that you can specify for DMS Serverless is 384. The MaxCapacityUnits
parameter is the only\n DCU parameter you are required to specify.
Specifies the maximum value of the DMS capacity units (DCUs) for which a given DMS Serverless\n replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed.\n The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value\n that you can specify for DMS Serverless is 384. The MaxCapacityUnits
parameter is the only\n DCU parameter you are required to specify.
Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS\n Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 2 DCUs as the minimum value\n allowed. The list of valid DCU values includes 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU\n value that you can specify for DMS Serverless is 2. You don't have to specify a value for the\n MinCapacityUnits
parameter. If you don't set this value, DMS scans the current activity\n of available source tables to identify an optimum setting for this parameter. If there is no current\n source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to\n the minimum DCU value allowed, 2.
Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS\n Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value\n allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU\n value that you can specify for DMS Serverless is 1. You don't have to specify a value for the\n MinCapacityUnits
parameter. If you don't set this value, DMS scans the current activity\n of available source tables to identify an optimum setting for this parameter. If there is no current\n source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to\n the minimum DCU value allowed, 1.
Creates a data provider using the provided settings. A data provider stores \n a data store type and location information about your database.
" + "smithy.api#documentation": "Creates a data provider using the provided settings. A data provider stores \n a data store type and location information about your database.
", + "smithy.api#examples": [ + { + "title": "Create Data Provider", + "documentation": "Creates the data provider with the specified parameters.", + "input": { + "DataProviderName": "sqlServer-dev", + "Engine": "sqlserver", + "Description": "description", + "Settings": { + "MicrosoftSqlServerSettings": { + "ServerName": "ServerName2", + "Port": 11112, + "DatabaseName": "DatabaseName", + "SslMode": "none" + } + }, + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ] + }, + "output": { + "DataProvider": { + "Engine": "postgres", + "Settings": { + "PostgreSqlSettings": { + "SslMode": "none", + "ServerName": "postrgesql.a1b2c3d4e5f6.us-east-1.rds.amazonaws.com", + "Port": 5432, + "DatabaseName": "target" + } + }, + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-dataprovider", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-dataprovider" + } + } + } + ] } }, "com.amazonaws.databasemigrationservice#CreateDataProviderMessage": { @@ -2969,7 +3010,41 @@ } ], "traits": { - "smithy.api#documentation": "Creates the instance profile using the specified parameters.
" + "smithy.api#documentation": "Creates the instance profile using the specified parameters.
", + "smithy.api#examples": [ + { + "title": "Create Instance Profile", + "documentation": "Creates the instance profile using the specified parameters.", + "input": { + "SubnetGroupIdentifier": "my-subnet-group", + "PubliclyAccessible": true, + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "InstanceProfileName": "my-instance-profile", + "Description": "Description", + "NetworkType": "DUAL", + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ] + }, + "output": { + "InstanceProfile": { + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8", + "VpcSecurityGroups": [ + "sg-0123456" + ], + "PubliclyAccessible": true, + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile" + } + } + } + ] } }, "com.amazonaws.databasemigrationservice#CreateInstanceProfileMessage": { @@ -3077,7 +3152,72 @@ } ], "traits": { - "smithy.api#documentation": "Creates the migration project using the specified parameters.
\nYou can run this action only after you create an instance profile and data providers \n using CreateInstanceProfile and CreateDataProvider.
" + "smithy.api#documentation": "Creates the migration project using the specified parameters.
\nYou can run this action only after you create an instance profile and data providers \n using CreateInstanceProfile and CreateDataProvider.
", + "smithy.api#examples": [ + { + "title": "Create Migration Project", + "documentation": "Creates the migration project with the specified parameters.", + "input": { + "MigrationProjectName": "my-migration-project", + "SourceDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/ALL.SOURCE.ORACLE_12-A1B2C3", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access" + } + ], + "TargetDataProviderDescriptors": [ + { + "DataProviderIdentifier": "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myorg/example1/TARGET.postgresql-A1B2C3", + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/myuser-admin-access" + } + ], + "InstanceProfileIdentifier": "ip-au-17", + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "arn:aws:s3:::mylogin-bucket", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/Admin" + }, + "Tags": [ + { + "Key": "access", + "Value": "authorizedusers" + } + ], + "Description": "description", + "TransformationRules": "{\"key0\":\"value0\",\"key1\":\"value1\",\"key2\":\"value2\"}" + }, + "output": { + "MigrationProject": { + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/ALL.SOURCE.ORACLE_12-0123456", + "DataProviderName": "source-oracle-12", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ], + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectName": "my-migration-project", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "TargetDataProviderDescriptors": [ + { + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/dmytbon-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:myuser/TARGET.postgresql-0123456", + "DataProviderName": "target-dataprovider-3", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + } + ] } }, "com.amazonaws.databasemigrationservice#CreateMigrationProjectMessage": { @@ -3322,7 +3462,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates the replication instance using the specified parameters.
\nDMS requires that your account have certain roles with appropriate permissions\n before you can create a replication instance. For information on the required roles, see\n Creating the IAM Roles to Use With the CLI and DMS API. For\n information on the required permissions, see \n IAM Permissions Needed to Use DMS.
", + "smithy.api#documentation": "Creates the replication instance using the specified parameters.
\nDMS requires that your account have certain roles with appropriate permissions\n before you can create a replication instance. For information on the required roles, see\n Creating the IAM Roles to Use With the CLI and DMS API. For\n information on the required permissions, see \n IAM Permissions Needed to Use DMS.
\nIf you don't specify a version when creating a replication instance, DMS will create the instance using the\n default engine version. For information about the default engine version, see\n Release Notes.
\nA value that indicates whether minor engine upgrades are applied automatically to the\n replication instance during the maintenance window. This parameter defaults to\n true
.
Default: true
\n
When AutoMinorVersionUpgrade
is enabled, DMS uses the current default\n engine version when you create a replication instance. For example, if you set\n EngineVersion
to a lower version number than the current default version,\n DMS uses the default version.
If AutoMinorVersionUpgrade
\n isn’t enabled when you create a replication instance, DMS uses the\n engine version specified by the EngineVersion
parameter.
A value that indicates whether minor engine upgrades are applied automatically to the\n replication instance during the maintenance window. This parameter defaults to\n true
.
Default: true
\n
Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“
" + "smithy.api#documentation": "Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“
" } }, "Tags": { @@ -3950,6 +4090,9 @@ "com.amazonaws.databasemigrationservice#DataProviderSettings": { "type": "union", "members": { + "RedshiftSettings": { + "target": "com.amazonaws.databasemigrationservice#RedshiftDataProviderSettings" + }, "PostgreSqlSettings": { "target": "com.amazonaws.databasemigrationservice#PostgreSqlDataProviderSettings" }, @@ -3961,6 +4104,21 @@ }, "MicrosoftSqlServerSettings": { "target": "com.amazonaws.databasemigrationservice#MicrosoftSqlServerDataProviderSettings" + }, + "DocDbSettings": { + "target": "com.amazonaws.databasemigrationservice#DocDbDataProviderSettings" + }, + "MariaDbSettings": { + "target": "com.amazonaws.databasemigrationservice#MariaDbDataProviderSettings", + "traits": { + "smithy.api#documentation": "Provides information that defines a MariaDB data provider.
" + } + }, + "MongoDbSettings": { + "target": "com.amazonaws.databasemigrationservice#MongoDbDataProviderSettings", + "traits": { + "smithy.api#documentation": "Provides information that defines a MongoDB data provider.
" + } } }, "traits": { @@ -4370,7 +4528,32 @@ } ], "traits": { - "smithy.api#documentation": "Deletes the specified data provider.
\nAll migration projects associated with the data provider must be deleted or modified \n before you can delete the data provider.
\nDeletes the specified data provider.
\nAll migration projects associated with the data provider must be deleted or modified \n before you can delete the data provider.
\nDeletes the specified instance profile.
\nAll migration projects associated with the instance profile must be deleted or modified \n before you can delete the instance profile.
\nDeletes the specified instance profile.
\nAll migration projects associated with the instance profile must be deleted or modified \n before you can delete the instance profile.
\nDeletes the specified migration project.
\nThe migration project must be closed before you can delete it.
\nDeletes the specified migration project.
\nThe migration project must be closed before you can delete it.
\nReturns configuration parameters for a schema conversion project.
" + "smithy.api#documentation": "Returns configuration parameters for a schema conversion project.
", + "smithy.api#examples": [ + { + "title": "Describe Conversion Configuration", + "documentation": "Returns configuration parameters for a schema conversion project.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "ConversionConfiguration": "{\"Common project settings\":{\"ShowSeverityLevelInSql\":\"CRITICAL\"},\"ORACLE_TO_POSTGRESQL\" : {\"ToTimeZone\":false,\"LastDayBuiltinFunctionOracle\":false, \"NextDayBuiltinFunctionOracle\":false,\"ConvertProceduresToFunction\":false,\"NvlBuiltinFunctionOracle\":false,\"DbmsAssertBuiltinFunctionOracle\":false}}" + } + } + ] } }, "com.amazonaws.databasemigrationservice#DescribeConversionConfigurationMessage": { @@ -5502,6 +5759,43 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of data providers for your account in the current region.
", + "smithy.api#examples": [ + { + "title": "Describe Data Providers", + "documentation": "", + "input": { + "Filters": [ + { + "Name": "data-provider-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:data-provider:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "MaxRecords": 20, + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + }, + "output": { + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "DataProviders": [ + { + "Engine": "postgres", + "DataProviderCreationTime": "2023-05-12T10:50:41.988561Z", + "DataProviderName": "my-target-data-provider", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:my-target-data-provider", + "Settings": { + "PostgreSqlSettings": { + "SslMode": "none", + "ServerName": "postrgesql.0a1b2c3d4e5f.us-east-1.rds.amazonaws.com", + "Port": 5432, + "DatabaseName": "target" + } + } + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6126,6 +6420,35 @@ }, "traits": { "smithy.api#documentation": "Returns a paginated list of extension pack associations for the specified migration project.\n An extension pack is an add-on module \n that emulates functions present in a source database that are required when converting objects \n to the target database.
", + "smithy.api#examples": [ + { + "title": "Describe Extension Pack Associations", + "documentation": "Returns a paginated list of extension pack associations for the specified migration project.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Filters": [ + { + "Name": "instance-profile-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6538,6 +6861,38 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of instance profiles for your account in the current region.
", + "smithy.api#examples": [ + { + "title": "Describe Instance Profiles", + "documentation": "Returns a paginated list of instance profiles for your account in the current region.", + "input": { + "Filters": [ + { + "Name": "instance-profile-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:instance-profile:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345" + ] + } + ], + "MaxRecords": 20, + "Marker": "0123456789abcdefghijklmnopqrs" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "InstanceProfiles": [ + { + "SubnetGroupIdentifier": "public-subnets", + "VpcIdentifier": "vpc-0a1b2c3d4e5f6g7h8", + "PubliclyAccessible": true, + "KmsKeyArn": "arn:aws:kms:us-east-1:012345678901:key/01234567-89ab-cdef-0123-456789abcdef", + "InstanceProfileCreationTime": "2022-12-16T09:44:43.543246Z", + "InstanceProfileName": "my-instance-profile", + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:my-instance-profile" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6606,6 +6961,35 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of metadata model assessments for your account in the current region.
", + "smithy.api#examples": [ + { + "title": "Describe Metadata Model Assessments", + "documentation": "Returns a paginated list of metadata model assessments for your account in the current region.", + "input": { + "MigrationProjectIdentifier": "", + "Filters": [ + { + "Name": "my-migration-project", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "Marker": "ASDLKJASDJKHDFHGDNBGDASKJHGFK", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6681,6 +7065,35 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of metadata model conversions for a migration project.
", + "smithy.api#examples": [ + { + "title": "Describe Metadata Model Conversions", + "documentation": "Returns a paginated list of metadata model conversions for a migration project.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "MaxRecords": 123 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6756,6 +7169,35 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of metadata model exports.
", + "smithy.api#examples": [ + { + "title": "Describe Metadata Model Exports As Script", + "documentation": "Returns a paginated list of metadata model exports.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6831,6 +7273,35 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of metadata model exports.
", + "smithy.api#examples": [ + { + "title": "Describe Metadata Model Exports To Target", + "documentation": "Returns a paginated list of metadata model exports.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6906,6 +7377,35 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of metadata model imports.
", + "smithy.api#examples": [ + { + "title": "Describe Metadata Model Imports", + "documentation": "Returns a paginated list of metadata model imports.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "Filters": [ + { + "Name": "request-id", + "Values": [ + "01234567-89ab-cdef-0123-456789abcdef" + ] + } + ], + "Marker": "0123456789abcdefghijklmnopqrs", + "MaxRecords": 20 + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "Requests": [ + { + "Status": "SUCCESS", + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -6984,6 +7484,56 @@ ], "traits": { "smithy.api#documentation": "Returns a paginated list of migration projects for your account in the current region.
", + "smithy.api#examples": [ + { + "title": "Describe Migration Projects", + "documentation": "Returns a paginated list of migration projects for your account in the current region.", + "input": { + "Filters": [ + { + "Name": "migration-project-identifier", + "Values": [ + "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901" + ] + } + ], + "MaxRecords": 20, + "Marker": "EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ123456" + }, + "output": { + "Marker": "0123456789abcdefghijklmnopqrs", + "MigrationProjects": [ + { + "SchemaConversionApplicationAttributes": { + "S3BucketPath": "my-s3-bucket/my_folder", + "S3BucketRoleArn": "arn:aws:iam::012345678901:role/my-s3role" + }, + "SourceDataProviderDescriptors": [ + { + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/my-access-role", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/ALL.SOURCE.ORACLE_12-012345", + "DataProviderName": "all-source-oracle-12", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ], + "MigrationProjectCreationTime": "2023-04-19T11:45:15.805253Z", + "InstanceProfileArn": "arn:aws:dms:us-east-1:012345678901:instance-profile:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "MigrationProjectName": "my-migration-project", + "MigrationProjectArn": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "InstanceProfileName": "my-instance-profile", + "TargetDataProviderDescriptors": [ + { + "SecretsManagerAccessRoleArn": "arn:aws:iam::012345678901:role/dmytbon-admin-access", + "SecretsManagerSecretId": "arn:aws:secretsmanager:us-east-1:012345678901:secret:mygroup/myalias/TARGET.postgresql-012345", + "DataProviderName": "my-data-provider", + "DataProviderArn": "arn:aws:dms:us-east-1:012345678901:data-provider:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + ] + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "Marker", @@ -8858,6 +9408,44 @@ "smithy.api#documentation": "The settings in JSON format for the DMS Transfer type source endpoint.
" } }, + "com.amazonaws.databasemigrationservice#DocDbDataProviderSettings": { + "type": "structure", + "members": { + "ServerName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The name of the source DocumentDB server.
" + } + }, + "Port": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "The port value for the DocumentDB data provider.
" + } + }, + "DatabaseName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The database name on the DocumentDB data provider.
" + } + }, + "SslMode": { + "target": "com.amazonaws.databasemigrationservice#DmsSslModeValue", + "traits": { + "smithy.api#documentation": "The SSL mode used to connect to the DocumentDB data provider. \n The default value is none
.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information that defines a DocumentDB data provider.
" + } + }, "com.amazonaws.databasemigrationservice#DocDbSettings": { "type": "structure", "members": { @@ -9634,7 +10222,31 @@ } ], "traits": { - "smithy.api#documentation": "Saves a copy of a database migration assessment report to your Amazon S3 bucket. DMS can save \n your assessment report as a comma-separated value (CSV) or a PDF file.
" + "smithy.api#documentation": "Saves a copy of a database migration assessment report to your Amazon S3 bucket. DMS can save \n your assessment report as a comma-separated value (CSV) or a PDF file.
", + "smithy.api#examples": [ + { + "title": "Export Metadata Model Assessment", + "documentation": "Saves a copy of a database migration assessment report to your S3 bucket. DMS can save your assessment report as a comma-separated value (CSV) or a PDF file.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}", + "FileName": "file", + "AssessmentReportTypes": [ + "pdf" + ] + }, + "output": { + "PdfReport": { + "S3ObjectKey": "object-name", + "ObjectURL": "url" + }, + "CsvReport": { + "S3ObjectKey": "object-name", + "ObjectURL": "url" + } + } + } + ] } }, "com.amazonaws.databasemigrationservice#ExportMetadataModelAssessmentMessage": { @@ -10788,6 +11400,38 @@ } } }, + "com.amazonaws.databasemigrationservice#MariaDbDataProviderSettings": { + "type": "structure", + "members": { + "ServerName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The name of the MariaDB server.
" + } + }, + "Port": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "The port value for the MariaDB data provider
" + } + }, + "SslMode": { + "target": "com.amazonaws.databasemigrationservice#DmsSslModeValue", + "traits": { + "smithy.api#documentation": "The SSL mode used to connect to the MariaDB data provider. \n The default value is none
.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information that defines a MariaDB data provider.
" + } + }, "com.amazonaws.databasemigrationservice#MessageFormatValue": { "type": "enum", "members": { @@ -10895,7 +11539,7 @@ "TrimSpaceInChar": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "Use the TrimSpaceInChar
source endpoint setting to trim data \n on CHAR and NCHAR data types during migration. The default value is true
.
Use the TrimSpaceInChar
source endpoint setting to right-trim data \n on CHAR and NCHAR data types during migration. Setting TrimSpaceInChar
\n does not left-trim data. The default value is true
.
Modifies the specified schema conversion configuration using the provided parameters.
" + "smithy.api#documentation": "Modifies the specified schema conversion configuration using the provided parameters.
", + "smithy.api#examples": [ + { + "title": "Modify Conversion Configuration", + "documentation": "Modifies the specified schema conversion configuration using the provided parameters.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "ConversionConfiguration": "{\"Common project settings\":{\"ShowSeverityLevelInSql\":\"CRITICAL\"},\"ORACLE_TO_POSTGRESQL\" : {\"ToTimeZone\":false,\"LastDayBuiltinFunctionOracle\":false, \"NextDayBuiltinFunctionOracle\":false,\"ConvertProceduresToFunction\":false,\"NvlBuiltinFunctionOracle\":false,\"DbmsAssertBuiltinFunctionOracle\":false}}" + }, + "output": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + } + } + ] } }, "com.amazonaws.databasemigrationservice#ModifyConversionConfigurationMessage": { @@ -11129,7 +11786,43 @@ } ], "traits": { - "smithy.api#documentation": "Modifies the specified data provider using the provided settings.
\nYou must remove the data provider from all migration projects before you can modify it.
\nModifies the specified data provider using the provided settings.
\nYou must remove the data provider from all migration projects before you can modify it.
\nModifies the specified instance profile using the provided parameters.
\nAll migration projects associated with the instance profile must be deleted \n or modified before you can modify the instance profile.
\nModifies the specified instance profile using the provided parameters.
\nAll migration projects associated with the instance profile must be deleted \n or modified before you can modify the instance profile.
\nModifies the specified migration project using the provided parameters.
\nThe migration project must be closed before you can modify it.
\nModifies the specified migration project using the provided parameters.
\nThe migration project must be closed before you can modify it.
\nA value that indicates that minor version upgrades are applied automatically to the\n replication instance during the maintenance window. Changing this parameter doesn't result\n in an outage, except in the case described following. The change is asynchronously applied\n as soon as possible.
\nAn outage does result if these factors apply:
\nThis parameter is set to true
during the maintenance window.
A newer minor version is available.
\nDMS has enabled automatic patching for the given engine version.
\nWhen AutoMinorVersionUpgrade
is enabled, DMS uses the current default\n engine version when you modify a replication instance. For example, if you set\n EngineVersion
to a lower version number than the current default version,\n DMS uses the default version.
If AutoMinorVersionUpgrade
\n isn’t enabled when you modify a replication instance, DMS uses the\n engine version specified by the EngineVersion
parameter.
A value that indicates that minor version upgrades are applied automatically to the\n replication instance during the maintenance window. Changing this parameter doesn't result\n in an outage, except in the case described following. The change is asynchronously applied\n as soon as possible.
\nAn outage does result if these factors apply:
\nThis parameter is set to true
during the maintenance window.
A newer minor version is available.
\nDMS has enabled automatic patching for the given engine version.
\nIndicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“
" + "smithy.api#documentation": "Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“
" } }, "TaskData": { @@ -12283,6 +13066,62 @@ "smithy.api#output": {} } }, + "com.amazonaws.databasemigrationservice#MongoDbDataProviderSettings": { + "type": "structure", + "members": { + "ServerName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The name of the MongoDB server.
" + } + }, + "Port": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "The port value for the MongoDB data provider.
" + } + }, + "DatabaseName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The database name on the MongoDB data provider.
" + } + }, + "SslMode": { + "target": "com.amazonaws.databasemigrationservice#DmsSslModeValue", + "traits": { + "smithy.api#documentation": "The SSL mode used to connect to the MongoDB data provider. \n The default value is none
.
The Amazon Resource Name (ARN) of the certificate used for SSL connection.
" + } + }, + "AuthType": { + "target": "com.amazonaws.databasemigrationservice#AuthTypeValue", + "traits": { + "smithy.api#documentation": "The authentication type for the database connection. Valid values are PASSWORD or NO.
" + } + }, + "AuthSource": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": " The MongoDB database name. This setting isn't used when AuthType
is\n set to \"no\"
.
The default is \"admin\"
.
The authentication method for connecting to the data provider. Valid values are DEFAULT, MONGODB_CR, or SCRAM_SHA_1.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information that defines a MongoDB data provider.
" + } + }, "com.amazonaws.databasemigrationservice#MongoDbSettings": { "type": "structure", "members": { @@ -12301,7 +13140,7 @@ "ServerName": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "The name of the server on the MongoDB source endpoint.
" + "smithy.api#documentation": "The name of the server on the MongoDB source endpoint. For MongoDB Atlas, provide the \n server name for any of the servers in the replication set.
" } }, "Port": { @@ -12949,7 +13788,7 @@ "SecretsManagerOracleAsmAccessRoleArn": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN\n of the IAM role that specifies DMS as the trusted entity and grants the required\n permissions to access the SecretsManagerOracleAsmSecret
. This\n SecretsManagerOracleAsmSecret
has the secret value that allows access to\n the Oracle ASM of the endpoint.
You can specify one of two sets of values for these permissions. You can specify the\n values for this setting and SecretsManagerOracleAsmSecretId
. Or you can\n specify clear-text values for AsmUserName
, AsmPassword
, and\n AsmServerName
. You can't specify both. For more information on\n creating this SecretsManagerOracleAsmSecret
and the\n SecretsManagerOracleAsmAccessRoleArn
and\n SecretsManagerOracleAsmSecretId
required to access it, see Using secrets to access Database Migration Service\n resources in the Database Migration Service User\n Guide.
Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN\n of the IAM role that specifies DMS as the trusted entity and grants the required\n permissions to access the SecretsManagerOracleAsmSecret
. This\n SecretsManagerOracleAsmSecret
has the secret value that allows access to\n the Oracle ASM of the endpoint.
You can specify one of two sets of values for these permissions. You can specify the\n values for this setting and SecretsManagerOracleAsmSecretId
. Or you can\n specify clear-text values for AsmUser
, AsmPassword
, and\n AsmServerName
. You can't specify both. For more information on\n creating this SecretsManagerOracleAsmSecret
and the\n SecretsManagerOracleAsmAccessRoleArn
and\n SecretsManagerOracleAsmSecretId
required to access it, see Using secrets to access Database Migration Service\n resources in the Database Migration Service User\n Guide.
When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as \n varchar(5)
.
When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as \n varchar(5)
. You must set this setting on both the source and target endpoints for it to take effect.
Provides information that defines a Redis target endpoint.
" } }, + "com.amazonaws.databasemigrationservice#RedshiftDataProviderSettings": { + "type": "structure", + "members": { + "ServerName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The name of the Amazon Redshift server.
" + } + }, + "Port": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "The port value for the Amazon Redshift data provider.
" + } + }, + "DatabaseName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "The database name on the Amazon Redshift data provider.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information that defines an Amazon Redshift data provider.
" + } + }, "com.amazonaws.databasemigrationservice#RedshiftSettings": { "type": "structure", "members": { @@ -13958,7 +14823,7 @@ "MapBooleanAsBoolean": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as \n varchar(1)
.
When true, lets Redshift migrate the boolean type as boolean. By default, Redshift migrates booleans as \n varchar(1)
. You must set this setting on both the source and target endpoints for it to take effect.
Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“
" + "smithy.api#documentation": "Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“
" } }, "RecoveryCheckpoint": { @@ -15728,7 +16593,7 @@ "CsvNullValue": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "An optional parameter that specifies how DMS treats null\n values. While handling the null value, you can use this\n parameter to pass a user-defined string as null when writing to\n the target. For example, when target columns are not nullable,\n you can use this option to differentiate between the empty\n string value and the null value. So, if you set this parameter\n value to the empty string (\"\" or ''), DMS treats the empty\n string as the null value instead of NULL
.
The default value is NULL
. Valid values include any valid string.
An optional parameter that specifies how DMS treats null\n values. While handling the null value, you can use this\n parameter to pass a user-defined string as null when writing to\n the target. For example, when target columns are nullable,\n you can use this option to differentiate between the empty\n string value and the null value. So, if you set this parameter\n value to the empty string (\"\" or ''), DMS treats the empty\n string as the null value instead of NULL
.
The default value is NULL
. Valid values include any valid string.
Applies the extension pack to your target database. An extension pack is an add-on \n module that emulates functions present in a source database that are required when \n converting objects to the target database. \n
" + "smithy.api#documentation": "Applies the extension pack to your target database. An extension pack is an add-on \n module that emulates functions present in a source database that are required when \n converting objects to the target database. \n
", + "smithy.api#examples": [ + { + "title": "Start Extension Pack Association", + "documentation": "Applies the extension pack to your target database.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartExtensionPackAssociationMessage": { @@ -16163,7 +17040,20 @@ } ], "traits": { - "smithy.api#documentation": "Creates a database migration assessment report by assessing the migration complexity for \n your source database. A database migration assessment report summarizes all of the schema \n conversion tasks. It also details the action items for database objects that can't be converted \n to the database engine of your target database instance.
" + "smithy.api#documentation": "Creates a database migration assessment report by assessing the migration complexity for \n your source database. A database migration assessment report summarizes all of the schema \n conversion tasks. It also details the action items for database objects that can't be converted \n to the database engine of your target database instance.
", + "smithy.api#examples": [ + { + "title": "Start Metadata Model Assessment", + "documentation": "Creates a database migration assessment report by assessing the migration complexity for \n your source database.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartMetadataModelAssessmentMessage": { @@ -16237,7 +17127,20 @@ } ], "traits": { - "smithy.api#documentation": "Converts your source database objects to a format compatible with the target database.
" + "smithy.api#documentation": "Converts your source database objects to a format compatible with the target database.
", + "smithy.api#examples": [ + { + "title": "Start Metadata Model Conversion", + "documentation": "Converts your source database objects to a format compatible with the target database. ", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartMetadataModelConversionMessage": { @@ -16311,7 +17214,22 @@ } ], "traits": { - "smithy.api#documentation": "Saves your converted code to a file as a SQL script, and stores this file on your Amazon S3 bucket.
" + "smithy.api#documentation": "Saves your converted code to a file as a SQL script, and stores this file on your Amazon S3 bucket.
", + "smithy.api#examples": [ + { + "title": "Start Metadata Model Export As Script", + "documentation": "Saves your converted code to a file as a SQL script, and stores this file on your S3 bucket.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}", + "Origin": "SOURCE", + "FileName": "FILE" + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartMetadataModelExportAsScriptMessage": { @@ -16398,7 +17316,21 @@ } ], "traits": { - "smithy.api#documentation": "Applies converted database objects to your target database.
" + "smithy.api#documentation": "Applies converted database objects to your target database.
", + "smithy.api#examples": [ + { + "title": "Start Metadata Model Export To Target", + "documentation": "Applies converted database objects to your target database.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:EXAMPLEABCDEFGHIJKLMNOPQRSTUVWXYZ012345", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-a1b2c3d4e5f6.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}", + "OverwriteExtensionPack": true + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartMetadataModelExportToTargetMessage": { @@ -16478,7 +17410,22 @@ } ], "traits": { - "smithy.api#documentation": "Loads the metadata for all the dependent database objects of the parent object.
\nThis operation uses your project's Amazon S3 bucket as a metadata cache to improve performance.
" + "smithy.api#documentation": "Loads the metadata for all the dependent database objects of the parent object.
\nThis operation uses your project's Amazon S3 bucket as a metadata cache to improve performance.
", + "smithy.api#examples": [ + { + "title": "Start Metadata Model Import", + "documentation": "Loads the metadata for all the dependent database objects of the parent object.", + "input": { + "MigrationProjectIdentifier": "arn:aws:dms:us-east-1:012345678901:migration-project:0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ012", + "SelectionRules": "{\"rules\": [{\"rule-type\": \"selection\",\"rule-id\": \"1\",\"rule-name\": \"1\",\"object-locator\": {\"server-name\": \"aurora-pg.cluster-0a1b2c3d4e5f.us-east-1.rds.amazonaws.com\", \"schema-name\": \"schema1\", \"table-name\": \"Cities\"},\"rule-action\": \"explicit\"} ]}", + "Origin": "SOURCE", + "Refresh": false + }, + "output": { + "RequestIdentifier": "01234567-89ab-cdef-0123-456789abcdef" + } + } + ] } }, "com.amazonaws.databasemigrationservice#StartMetadataModelImportMessage": { @@ -16946,7 +17893,7 @@ "CdcStopPosition": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12“
" + "smithy.api#documentation": "Indicates when you want a change data capture (CDC) operation to stop. The value can be\n either server time or commit time.
\nServer time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”
\nCommit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“
" } } }, @@ -17713,7 +18660,7 @@ } ], "traits": { - "smithy.api#documentation": "Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts them to corresponding Amazon EventBridge rules.\n By default, this operation migrates subscriptions only when all your replication instance versions are 3.4.6 or higher.\n If any replication instances are from versions earlier than 3.4.6, the operation raises an error and tells you\n to upgrade these instances to version 3.4.6 or higher. To enable migration regardless of version, set the Force
\n option to true. However, if you don't upgrade instances earlier than version 3.4.6, some types of events might not be\n available when you use Amazon EventBridge.
To call this operation, make sure that you have certain permissions added to your user account.\n For more information, see Migrating event subscriptions to Amazon EventBridge\n in the Amazon Web Services Database Migration Service User Guide.
" + "smithy.api#documentation": "Migrates 10 active and enabled Amazon SNS subscriptions at a time and converts them to corresponding Amazon EventBridge rules.\n By default, this operation migrates subscriptions only when all your replication instance versions are 3.4.5 or higher.\n If any replication instances are from versions earlier than 3.4.5, the operation raises an error and tells you\n to upgrade these instances to version 3.4.5 or higher. To enable migration regardless of version, set the Force
\n option to true. However, if you don't upgrade instances earlier than version 3.4.5, some types of events might not be\n available when you use Amazon EventBridge.
To call this operation, make sure that you have certain permissions added to your user account.\n For more information, see Migrating event subscriptions to Amazon EventBridge\n in the Amazon Web Services Database Migration Service User Guide.
" } }, "com.amazonaws.databasemigrationservice#UpdateSubscriptionsToEventBridgeMessage": { @@ -17722,7 +18669,7 @@ "ForceMove": { "target": "com.amazonaws.databasemigrationservice#BooleanOptional", "traits": { - "smithy.api#documentation": "When set to true, this operation migrates DMS subscriptions for Amazon SNS\n notifications no matter what your replication instance version is. If not set or set to\n false, this operation runs only when all your replication instances are from DMS\n version 3.4.6 or higher.\n
" + "smithy.api#documentation": "When set to true, this operation migrates DMS subscriptions for Amazon SNS\n notifications no matter what your replication instance version is. If not set or set to\n false, this operation runs only when all your replication instances are from DMS\n version 3.4.5 or higher.\n
" } } }, diff --git a/codegen/sdk/aws-models/datasync.json b/codegen/sdk/aws-models/datasync.json index 2cde9b70a89..de31952bc32 100644 --- a/codegen/sdk/aws-models/datasync.json +++ b/codegen/sdk/aws-models/datasync.json @@ -5175,7 +5175,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates recommendations about where to migrate your data to in Amazon Web Services.\n Recommendations are generated based on information that DataSync Discovery collects about your\n on-premises storage system's resources. For more information, see Recommendations\n provided by DataSync Discovery.
\nOnce generated, you can view your recommendations by using the DescribeStorageSystemResources operation.
\nIf your discovery job completes successfully, you don't need to use this operation. DataSync\n Discovery generates the recommendations for you automatically.
\nCreates recommendations about where to migrate your data to in Amazon Web Services.\n Recommendations are generated based on information that DataSync Discovery collects about your\n on-premises storage system's resources. For more information, see Recommendations\n provided by DataSync Discovery.
\nOnce generated, you can view your recommendations by using the DescribeStorageSystemResources operation.
", "smithy.api#endpoint": { "hostPrefix": "discovery-" } @@ -7446,7 +7446,7 @@ "Verified": { "target": "com.amazonaws.datasync#ReportOverride", "traits": { - "smithy.api#documentation": "Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer. This only applies if you configure your task to verify data during and after the transfer (which DataSync does by default).
" + "smithy.api#documentation": "Specifies the level of reporting for the files, objects, and directories that DataSync attempted to verify at the end of your transfer.
" } }, "Deleted": { diff --git a/codegen/sdk/aws-models/dynamodb.json b/codegen/sdk/aws-models/dynamodb.json index c4e51dc02fb..513b55bd698 100644 --- a/codegen/sdk/aws-models/dynamodb.json +++ b/codegen/sdk/aws-models/dynamodb.json @@ -5246,6 +5246,18 @@ "traits": { "smithy.api#documentation": "The number of items exported.
" } + }, + "ExportType": { + "target": "com.amazonaws.dynamodb#ExportType", + "traits": { + "smithy.api#documentation": "Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT
or INCREMENTAL_EXPORT
. If INCREMENTAL_EXPORT
is provided, the IncrementalExportSpecification
must also be used.
Optional object containing the parameters specific to an incremental export.
" + } } }, "traits": { @@ -5272,6 +5284,9 @@ } } }, + "com.amazonaws.dynamodb#ExportFromTime": { + "type": "timestamp" + }, "com.amazonaws.dynamodb#ExportManifest": { "type": "string" }, @@ -5336,6 +5351,12 @@ "traits": { "smithy.api#documentation": "Export can be in one of the following states: IN_PROGRESS, COMPLETED, or\n FAILED.
" } + }, + "ExportType": { + "target": "com.amazonaws.dynamodb#ExportType", + "traits": { + "smithy.api#documentation": "Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT
or INCREMENTAL_EXPORT
. If INCREMENTAL_EXPORT
is provided, the IncrementalExportSpecification
must also be used.
The format for the exported data. Valid values for ExportFormat
are\n DYNAMODB_JSON
or ION
.
Choice of whether to execute as a full export or incremental export. Valid values are FULL_EXPORT
or INCREMENTAL_EXPORT
. If INCREMENTAL_EXPORT
is provided, the IncrementalExportSpecification
must also be used.
Optional object containing the parameters specific to an incremental export.
" + } } }, "traits": { @@ -5456,6 +5489,43 @@ "com.amazonaws.dynamodb#ExportTime": { "type": "timestamp" }, + "com.amazonaws.dynamodb#ExportToTime": { + "type": "timestamp" + }, + "com.amazonaws.dynamodb#ExportType": { + "type": "enum", + "members": { + "FULL_EXPORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FULL_EXPORT" + } + }, + "INCREMENTAL_EXPORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCREMENTAL_EXPORT" + } + } + } + }, + "com.amazonaws.dynamodb#ExportViewType": { + "type": "enum", + "members": { + "NEW_IMAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEW_IMAGE" + } + }, + "NEW_AND_OLD_IMAGES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEW_AND_OLD_IMAGES" + } + } + } + }, "com.amazonaws.dynamodb#ExpressionAttributeNameMap": { "type": "map", "key": { @@ -6426,6 +6496,32 @@ } } }, + "com.amazonaws.dynamodb#IncrementalExportSpecification": { + "type": "structure", + "members": { + "ExportFromTime": { + "target": "com.amazonaws.dynamodb#ExportFromTime", + "traits": { + "smithy.api#documentation": "Time in the past which provides the inclusive start range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state including and after this point in time.
" + } + }, + "ExportToTime": { + "target": "com.amazonaws.dynamodb#ExportToTime", + "traits": { + "smithy.api#documentation": "Time in the past which provides the exclusive end range for the export table's data, counted in seconds from the start of the Unix epoch. The incremental export will reflect the table's state just prior to this point in time. If this is not provided, the latest time with data available will be used.
" + } + }, + "ExportViewType": { + "target": "com.amazonaws.dynamodb#ExportViewType", + "traits": { + "smithy.api#documentation": "Choice of whether to output the previous item image prior to the start time of the incremental export. Valid values are NEW_AND_OLD_IMAGES
and NEW_IMAGES
.
Optional object containing the parameters specific to an incremental export.
" + } + }, "com.amazonaws.dynamodb#IndexName": { "type": "string", "traits": { @@ -6961,7 +7057,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "List backups associated with an Amazon Web Services account. To list backups for a\n given table, specify TableName
. ListBackups
returns a\n paginated list of results with at most 1 MB worth of items in a page. You can also\n specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.
\nYou can call ListBackups
a maximum of five times per second.
List DynamoDB backups that are associated with an Amazon Web Services account and weren't made with Amazon Web Services Backup. \n To list these backups for a given table, specify TableName
. ListBackups
returns a\n paginated list of results with at most 1 MB worth of items in a page. You can also\n specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these\n boundaries are for the time at which the original backup was requested.
\nYou can call ListBackups
a maximum of five times per second.
If you want to retrieve the complete list of backups made with Amazon Web Services Backup, use the \n Amazon Web Services Backup list API.\n
" } }, "com.amazonaws.dynamodb#ListBackupsInput": { diff --git a/codegen/sdk/aws-models/ec2.json b/codegen/sdk/aws-models/ec2.json index bcd11511cd0..1094b39c881 100644 --- a/codegen/sdk/aws-models/ec2.json +++ b/codegen/sdk/aws-models/ec2.json @@ -14233,7 +14233,7 @@ "type": "structure", "members": { "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#AvailabilityZoneName", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "The Availability Zone in which to create the default subnet.
", @@ -20187,6 +20187,14 @@ "smithy.api#default": false, "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation
. \n Otherwise, it is UnauthorizedOperation
.
\n\t\t Choose to enable or disable support for Federal Information Processing Standards (FIPS) on the instance.\n\t
" + } } }, "traits": { @@ -64454,6 +64462,132 @@ "traits": { "smithy.api#enumValue": "r7gd.16xlarge" } + }, + "r7a_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.medium" + } + }, + "r7a_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.large" + } + }, + "r7a_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.xlarge" + } + }, + "r7a_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.2xlarge" + } + }, + "r7a_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.4xlarge" + } + }, + "r7a_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.8xlarge" + } + }, + "r7a_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.12xlarge" + } + }, + "r7a_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.16xlarge" + } + }, + "r7a_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.24xlarge" + } + }, + "r7a_32xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.32xlarge" + } + }, + "r7a_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r7a.48xlarge" + } + }, + "c7i_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.large" + } + }, + "c7i_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.xlarge" + } + }, + "c7i_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.2xlarge" + } + }, + "c7i_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.4xlarge" + } + }, + "c7i_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.8xlarge" + } + }, + "c7i_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.12xlarge" + } + }, + "c7i_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.16xlarge" + } + }, + "c7i_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.24xlarge" + } + }, + "c7i_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i.48xlarge" + } + }, + "mac2_m2pro_metal": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "mac2-m2pro.metal" + } } } }, @@ -88836,7 +88970,7 @@ "target": "com.amazonaws.ec2#RevokeSecurityGroupIngressResult" }, "traits": { - "smithy.api#documentation": "Removes the specified inbound (ingress) rules from a security group.
\nYou can specify rules using either rule IDs or security group rule properties. If you use\n rule properties, the values that you specify (for example, ports) must match the existing rule's \n values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, \n security group, or prefix list). For the TCP and UDP protocols, you must also specify the \n destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type \n and code. If the security group rule has a description, you do not need to specify the description \n to revoke the rule.
\nFor a default VPC, if the values you specify do not match the existing rule's values, no error is\n returned, and the output describes the security group rules that were not revoked.
\nAmazon Web Services recommends that you describe the security group to verify that the rules were removed.
\nRule changes are propagated to instances within the security group as quickly as possible. \n However, a small delay might occur.
" + "smithy.api#documentation": "Removes the specified inbound (ingress) rules from a security group.
\nYou can specify rules using either rule IDs or security group rule properties. If you use\n rule properties, the values that you specify (for example, ports) must match the existing rule's \n values exactly. Each rule has a protocol, from and to ports, and source (CIDR range, \n security group, or prefix list). For the TCP and UDP protocols, you must also specify the \n destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type \n and code. If the security group rule has a description, you do not need to specify the description \n to revoke the rule.
\nFor a default VPC, if the values you specify do not match the existing rule's values,\n no error is returned, and the output describes the security group rules that were not\n revoked.
\nFor a non-default VPC, if the values you specify do not match the existing rule's\n values, an InvalidPermission.NotFound
client error is returned, and no\n rules are revoked.
Amazon Web Services recommends that you describe the security group to verify that the rules were removed.
\nRule changes are propagated to instances within the security group as quickly as possible. \n However, a small delay might occur.
" } }, "com.amazonaws.ec2#RevokeSecurityGroupIngressRequest": { @@ -101881,6 +102015,16 @@ "smithy.api#documentation": "The tags.
", "smithy.api#xmlName": "tagSet" } + }, + "FipsEnabled": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "FipsEnabled", + "smithy.api#clientOptional": {}, + "smithy.api#default": false, + "smithy.api#documentation": "\n\t\t Describes if support for Federal Information Processing Standards (FIPS) is enabled on the instance.\n\t
", + "smithy.api#xmlName": "fipsEnabled" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/efs.json b/codegen/sdk/aws-models/efs.json index 4b84dddbc18..c0afd3e01ec 100644 --- a/codegen/sdk/aws-models/efs.json +++ b/codegen/sdk/aws-models/efs.json @@ -424,7 +424,7 @@ "PerformanceMode": { "target": "com.amazonaws.efs#PerformanceMode", "traits": { - "smithy.api#documentation": "The performance mode of the file system. We recommend generalPurpose
\n performance mode for most file systems. File systems using the maxIO
performance\n mode can scale to higher levels of aggregate throughput and operations per second with a\n tradeoff of slightly higher latencies for most file operations. The performance mode\n can't be changed after the file system has been created.
The maxIO
mode is not supported on file systems using One Zone storage classes.
The performance mode of the file system. We recommend generalPurpose
\n performance mode for most file systems. File systems using the maxIO
performance\n mode can scale to higher levels of aggregate throughput and operations per second with a\n tradeoff of slightly higher latencies for most file operations. The performance mode\n can't be changed after the file system has been created.
The maxIO
mode is not supported on file systems using One Zone storage classes.
Default is generalPurpose
.
The throughput, measured in\n MiB/s,\n that you want to provision for a file system that you're creating. Valid values are\n 1-1024. Required if ThroughputMode
is set to provisioned
. The upper\n limit for throughput is 1024 MiB/s. To increase this limit, contact Amazon Web Services Support. For\n more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.
The throughput, measured in mebibytes per second (MiBps), that you want to provision for a\n file system that you're creating. Required if ThroughputMode
is set to\n provisioned
. Valid values are 1-3414 MiBps, with the upper limit depending on\n Region. To increase this limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas\n that you can increase in the Amazon EFS User\n Guide.
Creates a replication configuration that replicates an existing EFS file system to a new,\n read-only file system. For more information, see Amazon EFS replication in the\n Amazon EFS User Guide. The replication configuration\n specifies the following:
\n\n Source file system - An existing EFS file system that you\n want replicated. The source file system cannot be a destination file system in an existing\n replication configuration.
\n\n Destination file system configuration - The configuration of\n the destination file system to which the source file system will be replicated. There can\n only be one destination file system in a replication configuration. The destination file\n system configuration consists of the following properties:
\n\n Amazon Web Services Region - The Amazon Web Services Region in\n which the destination file system is created. Amazon EFS replication is\n available in all Amazon Web Services Regions that Amazon EFS is available in,\n except Africa (Cape Town), Asia Pacific (Hong Kong), Asia Pacific (Jakarta), Europe\n (Milan), and Middle East (Bahrain).
\n\n Availability Zone - If you want the destination file system\n to use EFS One Zone availability and durability, you must specify the Availability\n Zone to create the file system in. For more information about EFS storage classes, see\n \n Amazon EFS storage classes in the Amazon EFS User\n Guide.
\n\n Encryption - All destination file systems are created with\n encryption at rest enabled. You can specify the Key Management Service (KMS) key that is used to encrypt the destination file system. If you don't\n specify a KMS key, your service-managed KMS key for\n Amazon EFS is used.
\nAfter the file system is created, you cannot change the KMS key.
\nThe following properties are set by default:
\n\n Performance mode - The destination file system's performance\n mode matches that of the source file system, unless the destination file system uses EFS\n One Zone storage. In that case, the General Purpose performance mode is used. The\n performance mode cannot be changed.
\n\n Throughput mode - The destination file system's throughput\n mode matches that of the source file system. After the file system is created, you can modify the\n throughput mode.
\nThe following properties are turned off by default:
\n\n Lifecycle management - EFS lifecycle management and EFS\n Intelligent-Tiering are not enabled on the destination file system. After the destination\n file system is created, you can enable EFS lifecycle management and EFS\n Intelligent-Tiering.
\n\n Automatic backups - Automatic daily backups not enabled on\n the destination file system. After the file system is created, you can change this\n setting.
\nFor more information, see Amazon EFS replication in the\n Amazon EFS User Guide.
", + "smithy.api#documentation": "Creates a replication configuration that replicates an existing EFS file system to a new,\n read-only file system. For more information, see Amazon EFS replication in the\n Amazon EFS User Guide. The replication configuration\n specifies the following:
\n\n Source file system - An existing EFS file system that you\n want replicated. The source file system cannot be a destination file system in an existing\n replication configuration.
\n\n Destination file system configuration - The configuration of\n the destination file system to which the source file system will be replicated. There can\n only be one destination file system in a replication configuration. The destination file\n system configuration consists of the following properties:
\n\n Amazon Web Services Region - The Amazon Web Services Region in which the destination file system is created. Amazon EFS replication is available in all Amazon Web Services Regions in which EFS is\n available. To use EFS replication in a Region that is disabled by default, you must\n first opt in to the Region. For more information, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference\n Reference Guide\n
\n\n Availability Zone - If you want the destination file system\n to use EFS One Zone availability and durability, you must specify the Availability\n Zone to create the file system in. For more information about EFS storage classes, see\n \n Amazon EFS storage classes in the Amazon EFS User\n Guide.
\n\n Encryption - All destination file systems are created with\n encryption at rest enabled. You can specify the Key Management Service (KMS) key that is used to encrypt the destination file system. If you don't\n specify a KMS key, your service-managed KMS key for\n Amazon EFS is used.
\nAfter the file system is created, you cannot change the KMS key.
\nThe following properties are set by default:
\n\n Performance mode - The destination file system's performance\n mode matches that of the source file system, unless the destination file system uses EFS\n One Zone storage. In that case, the General Purpose performance mode is used. The\n performance mode cannot be changed.
\n\n Throughput mode - The destination file system's throughput\n mode matches that of the source file system. After the file system is created, you can modify the\n throughput mode.
\nThe following properties are turned off by default:
\n\n Lifecycle management - EFS lifecycle management and EFS\n Intelligent-Tiering are not enabled on the destination file system. After the destination\n file system is created, you can enable EFS lifecycle management and EFS\n Intelligent-Tiering.
\n\n Automatic backups - Automatic daily backups are enabled on\n the destination file system. After the file system is created, you can change this\n setting.
\nFor more information, see Amazon EFS replication in the\n Amazon EFS User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2015-02-01/file-systems/{SourceFileSystemId}/replication-configuration", @@ -995,7 +995,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an existing replication configuration. To delete a replication configuration, you\n must make the request from the Amazon Web Services Region in which the destination file system\n is located. Deleting a replication configuration ends the replication process. After a\n replication configuration is deleted, the destination file system is no longer read-only. You\n can write to the destination file system after its status becomes\n Writeable
.
Deletes an existing replication configuration. Deleting a replication configuration ends the replication process. After a\n replication configuration is deleted, the destination file system is no longer read-only. You\n can write to the destination file system after its status becomes\n Writeable
.
Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region.\n For more information, see Managing Amazon EFS resource IDs.
", + "smithy.api#documentation": "Returns the account preferences settings for the Amazon Web Services account associated with the user making the request, in the current Amazon Web Services Region.
", "smithy.api#http": { "method": "GET", "uri": "/2015-02-01/account-preferences", @@ -2123,7 +2123,7 @@ "ProvisionedThroughputInMibps": { "target": "com.amazonaws.efs#ProvisionedThroughputInMibps", "traits": { - "smithy.api#documentation": "The amount of provisioned throughput, measured in MiB/s, for the file system. Valid for\n file systems using ThroughputMode
set to provisioned
.
The amount of provisioned throughput, measured in MiBps, for the file system. Valid for\n file systems using ThroughputMode
set to provisioned
.
Describes the replication configuration for a specific file system.
" } }, "com.amazonaws.efs#ReplicationConfigurationDescriptions": { @@ -5333,7 +5336,7 @@ "ProvisionedThroughputInMibps": { "target": "com.amazonaws.efs#ProvisionedThroughputInMibps", "traits": { - "smithy.api#documentation": "(Optional) Sets the amount of provisioned throughput, in MiB/s, for the file\n system. Valid values are 1-1024. If you are changing the throughput mode to provisioned, you must also \n provide the amount of provisioned throughput. Required if ThroughputMode
is changed \n to provisioned
on update.
(Optional) The throughput, measured in mebibytes per second (MiBps), that you want to\n provision for a file system that you're creating. Required if ThroughputMode
\n is set to provisioned
. Valid values are 1-3414 MiBps, with the upper limit\n depending on Region. To increase this limit, contact Amazon Web Services Support. For more information,\n see Amazon EFS\n quotas that you can increase in the Amazon EFS User\n Guide.
The specification applied to each worker type.
" } + }, + "runtimeConfiguration": { + "target": "com.amazonaws.emrserverless#ConfigurationList", + "traits": { + "smithy.api#documentation": "The Configuration \n specifications of an application. Each configuration consists of a classification and properties. You use this \n parameter when creating or updating an application. To see the runtimeConfiguration object of an application,\n run the GetApplication API operation.
" + } + }, + "monitoringConfiguration": { + "target": "com.amazonaws.emrserverless#MonitoringConfiguration" } }, "traits": { @@ -1433,6 +1442,18 @@ "traits": { "smithy.api#documentation": "The key-value pairs that specify worker type to\n WorkerTypeSpecificationInput
. This parameter must contain all valid worker\n types for a Spark or Hive application. Valid worker types include Driver
and\n Executor
for Spark applications and HiveDriver
and\n TezTask
for Hive applications. You can either set image details in this\n parameter for each worker type, or in imageConfiguration
for all worker\n types.
The Configuration \n specifications to use when creating an application. Each configuration consists of a classification and properties.\n This configuration is applied to all the job runs submitted under the application.
" + } + }, + "monitoringConfiguration": { + "target": "com.amazonaws.emrserverless#MonitoringConfiguration", + "traits": { + "smithy.api#documentation": "The configuration setting for monitoring.
" + } } } }, @@ -3410,6 +3431,18 @@ "traits": { "smithy.api#documentation": "The Amazon EMR release label for the application. You can change the release\n label to use a different release of Amazon EMR.
" } + }, + "runtimeConfiguration": { + "target": "com.amazonaws.emrserverless#ConfigurationList", + "traits": { + "smithy.api#documentation": "The Configuration \n specifications to use when updating an application. Each configuration consists of a classification and properties.\n This configuration is applied across all the job runs submitted under the application.
" + } + }, + "monitoringConfiguration": { + "target": "com.amazonaws.emrserverless#MonitoringConfiguration", + "traits": { + "smithy.api#documentation": "The configuration setting for monitoring.
" + } } } }, diff --git a/codegen/sdk/aws-models/finspace-data.json b/codegen/sdk/aws-models/finspace-data.json index 7a05fb6f548..7c27b7cda55 100644 --- a/codegen/sdk/aws-models/finspace-data.json +++ b/codegen/sdk/aws-models/finspace-data.json @@ -956,7 +956,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds a user account to a permission group to grant permissions for actions a user can perform in FinSpace.
", + "smithy.api#documentation": "Adds a user to a permission group to grant permissions for actions a user can perform in FinSpace.
", "smithy.api#http": { "method": "POST", "uri": "/permission-group/{permissionGroupId}/users/{userId}", @@ -1041,7 +1041,8 @@ } }, "traits": { - "smithy.api#documentation": "The credentials required to access the external Dataview from the S3 location.
" + "smithy.api#documentation": "The credentials required to access the external Dataview from the S3 location.
", + "smithy.api#sensitive": {} } }, "com.amazonaws.finspacedata#Boolean": { @@ -1873,7 +1874,7 @@ "smithy.api#documentation": "The last name of the user that you want to register.
" } }, - "ApiAccess": { + "apiAccess": { "target": "com.amazonaws.finspacedata#ApiAccess", "traits": { "smithy.api#documentation": "The option to indicate whether the user can use the GetProgrammaticAccessCredentials
API to obtain credentials that can then be used to access other FinSpace Data API operations.
\n ENABLED
– The user has permissions to use the APIs.
\n DISABLED
– The user does not have permissions to use any APIs.
Short term API credentials.
" + "smithy.api#documentation": "Short term API credentials.
", + "smithy.api#sensitive": {} } }, "com.amazonaws.finspacedata#DataViewArn": { @@ -2551,7 +2553,7 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier for the user account that you want to disable.
", + "smithy.api#documentation": "The unique identifier for the user that you want to deactivate.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2574,7 +2576,7 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier for the disabled user account.
" + "smithy.api#documentation": "The unique identifier for the deactivated user.
" } } }, @@ -2611,7 +2613,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes a user account from a permission group.
", + "smithy.api#documentation": "Removes a user from a permission group.
", "smithy.api#http": { "method": "DELETE", "uri": "/permission-group/{permissionGroupId}/users/{userId}", @@ -2724,7 +2726,7 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier for the user account that you want to enable.
", + "smithy.api#documentation": "The unique identifier for the user that you want to activate.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2747,7 +2749,7 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier for the enabled user account.
" + "smithy.api#documentation": "The unique identifier for the active user.
" } } }, @@ -3450,7 +3452,7 @@ } ], "traits": { - "smithy.api#documentation": "Request programmatic credentials to use with FinSpace SDK.
", + "smithy.api#documentation": "Request programmatic credentials to use with FinSpace SDK. For more information, see Step 2. Access credentials programmatically using IAM access key id and secret access key.
", "smithy.api#http": { "method": "GET", "uri": "/credentials/programmatic", @@ -3561,13 +3563,13 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier for the user account that is retrieved.
" + "smithy.api#documentation": "The unique identifier for the user that is retrieved.
" } }, "status": { "target": "com.amazonaws.finspacedata#UserStatus", "traits": { - "smithy.api#documentation": "The current status of the user account.
\n\n CREATING
– The user account creation is in progress.
\n ENABLED
– The user account is created and is currently active.
\n DISABLED
– The user account is currently inactive.
The current status of the user.
\n\n CREATING
– The creation is in progress.
\n ENABLED
– The user is created and is currently active.
\n DISABLED
– The user is currently inactive.
The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "The timestamp at which the user was created in FinSpace. The value is determined as epoch time in milliseconds.
" } }, "lastEnabledTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "Describes the last time the user was activated. The value is determined as epoch time in milliseconds.
" } }, "lastDisabledTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "Describes the last time the user was deactivated. The value is determined as epoch time in milliseconds.
" } }, "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "Describes the last time the user details were updated. The value is determined as epoch time in milliseconds.
" } }, "lastLoginTime": { @@ -4137,7 +4139,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all the permission groups that are associated with a specific user account.
", + "smithy.api#documentation": "Lists all the permission groups that are associated with a specific user.
", "smithy.api#http": { "method": "GET", "uri": "/user/{userId}/permission-groups", @@ -4264,7 +4266,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all available user accounts in FinSpace.
", + "smithy.api#documentation": "Lists all available users in FinSpace.
", "smithy.api#http": { "method": "GET", "uri": "/user", @@ -4394,7 +4396,7 @@ "users": { "target": "com.amazonaws.finspacedata#UserList", "traits": { - "smithy.api#documentation": "A list of all the user accounts.
" + "smithy.api#documentation": "A list of all the users.
" } }, "nextToken": { @@ -4486,7 +4488,7 @@ "membershipStatus": { "target": "com.amazonaws.finspacedata#PermissionGroupMembershipStatus", "traits": { - "smithy.api#documentation": "Indicates the status of the user account within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user account is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user account is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
Indicates the status of the user within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
Indicates the status of the user account within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user account is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user account is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
Indicates the status of the user within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
The structure of a permission group associated with a user account.
" + "smithy.api#documentation": "The structure of a permission group associated with a user.
" } }, "com.amazonaws.finspacedata#PermissionGroupByUserList": { @@ -4690,7 +4692,7 @@ "temporaryPassword": { "target": "com.amazonaws.finspacedata#Password", "traits": { - "smithy.api#documentation": "A randomly generated temporary password for the requested user account. This password expires in 7 days.
" + "smithy.api#documentation": "A randomly generated temporary password for the requested user. This password expires in 7 days.
" } } }, @@ -4858,8 +4860,8 @@ "traits": { "smithy.api#default": 0, "smithy.api#range": { - "min": 60, - "max": 720 + "min": 1, + "max": 60 } } }, @@ -5301,7 +5303,7 @@ } ], "traits": { - "smithy.api#documentation": "Modifies the details of the specified user account. You cannot update the userId
for a user.
Modifies the details of the specified user. You cannot update the userId
for a user.
The unique identifier for the user account to update.
", + "smithy.api#documentation": "The unique identifier for the user that you want to update.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5368,7 +5370,7 @@ "userId": { "target": "com.amazonaws.finspacedata#UserId", "traits": { - "smithy.api#documentation": "The unique identifier of the updated user account.
" + "smithy.api#documentation": "The unique identifier of the updated user.
" } } }, @@ -5388,7 +5390,7 @@ "status": { "target": "com.amazonaws.finspacedata#UserStatus", "traits": { - "smithy.api#documentation": "The current status of the user account.
\n\n CREATING
– The user account creation is in progress.
\n ENABLED
– The user account is created and is currently active.
\n DISABLED
– The user account is currently inactive.
The current status of the user.
\n\n CREATING
– The user creation is in progress.
\n ENABLED
– The user is created and is currently active.
\n DISABLED
– The user is currently inactive.
The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "The timestamp at which the user was created in FinSpace. The value is determined as epoch time in milliseconds.
" } }, "lastEnabledTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.\n
" + "smithy.api#documentation": "Describes the last time the user was activated. The value is determined as epoch time in milliseconds.\n
" } }, "lastDisabledTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.
" + "smithy.api#documentation": "Describes the last time the user was deactivated. The value is determined as epoch time in milliseconds.
" } }, "lastModifiedTime": { "target": "com.amazonaws.finspacedata#TimestampEpoch", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.\n
" + "smithy.api#documentation": "Describes the last time the user was updated. The value is determined as epoch time in milliseconds.\n
" } }, "lastLoginTime": { @@ -5464,7 +5466,7 @@ } }, "traits": { - "smithy.api#documentation": "The details of the user account.
" + "smithy.api#documentation": "The details of the user.
" } }, "com.amazonaws.finspacedata#UserByPermissionGroup": { @@ -5479,7 +5481,7 @@ "status": { "target": "com.amazonaws.finspacedata#UserStatus", "traits": { - "smithy.api#documentation": "The current status of the user account.
\n\n CREATING
– The user account creation is in progress.
\n ENABLED
– The user account is created and is currently active.
\n DISABLED
– The user account is currently inactive.
The current status of the user.
\n\n CREATING
– The user creation is in progress.
\n ENABLED
– The user is created and is currently active.
\n DISABLED
– The user is currently inactive.
Indicates the status of the user account within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user account is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user account is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
Indicates the status of the user within a permission group.
\n\n ADDITION_IN_PROGRESS
– The user is currently being added to the permission group.
\n ADDITION_SUCCESS
– The user is successfully added to the permission group.
\n REMOVAL_IN_PROGRESS
– The user is currently being removed from the permission group.
The structure of a user account associated with a permission group.
" + "smithy.api#documentation": "The structure of a user associated with a permission group.
" } }, "com.amazonaws.finspacedata#UserByPermissionGroupList": { diff --git a/codegen/sdk/aws-models/firehose.json b/codegen/sdk/aws-models/firehose.json index 53befa1a513..02d7c3456e0 100644 --- a/codegen/sdk/aws-models/firehose.json +++ b/codegen/sdk/aws-models/firehose.json @@ -690,6 +690,28 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.firehose#AuthenticationConfiguration": { + "type": "structure", + "members": { + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "The ARN of the role used to access the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "Connectivity": { + "target": "com.amazonaws.firehose#Connectivity", + "traits": { + "smithy.api#documentation": "The type of connectivity used to access the Amazon MSK cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
" + } + }, "com.amazonaws.firehose#BlockSizeBytes": { "type": "integer", "traits": { @@ -764,7 +786,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?The destination in the Serverless offering for Amazon OpenSearch Service. You can\n specify only one destination." } + }, + "MSKSourceConfiguration": { + "target": "com.amazonaws.firehose#MSKSourceConfiguration" } }, "traits": { @@ -1488,6 +1530,12 @@ "traits": { "smithy.api#enumValue": "KinesisStreamAsSource" } + }, + "MSKAsSource": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MSKAsSource" + } } } }, @@ -4293,6 +4341,77 @@ "smithy.api#pattern": "^[^:*]*$" } }, + "com.amazonaws.firehose#MSKClusterARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^arn:" + } + }, + "com.amazonaws.firehose#MSKSourceConfiguration": { + "type": "structure", + "members": { + "MSKClusterARN": { + "target": "com.amazonaws.firehose#MSKClusterARN", + "traits": { + "smithy.api#documentation": "The ARN of the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "TopicName": { + "target": "com.amazonaws.firehose#TopicName", + "traits": { + "smithy.api#documentation": "The topic name within the Amazon MSK cluster.
", + "smithy.api#required": {} + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.firehose#AuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration for the Amazon MSK cluster to be used as the source for a delivery\n stream.
" + } + }, + "com.amazonaws.firehose#MSKSourceDescription": { + "type": "structure", + "members": { + "MSKClusterARN": { + "target": "com.amazonaws.firehose#MSKClusterARN", + "traits": { + "smithy.api#documentation": "The ARN of the Amazon MSK cluster.
" + } + }, + "TopicName": { + "target": "com.amazonaws.firehose#TopicName", + "traits": { + "smithy.api#documentation": "The topic name within the Amazon MSK cluster.
" + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.firehose#AuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "The authentication configuration of the Amazon MSK cluster.
" + } + }, + "DeliveryStartTimestamp": { + "target": "com.amazonaws.firehose#DeliveryStartTimestamp", + "traits": { + "smithy.api#documentation": "Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK\n cluster starting with this timestamp.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose\n delivery stream.
" + } + }, "com.amazonaws.firehose#NoEncryptionConfig": { "type": "enum", "members": { @@ -4740,6 +4859,12 @@ "traits": { "smithy.api#enumValue": "Delimiter" } + }, + "COMPRESSION_FORMAT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CompressionFormat" + } } } }, @@ -4762,6 +4887,12 @@ "smithy.api#enumValue": "RecordDeAggregation" } }, + "Decompression": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Decompression" + } + }, "Lambda": { "target": "smithy.api#Unit", "traits": { @@ -4814,7 +4945,7 @@ } ], "traits": { - "smithy.api#documentation": "Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To\n write multiple data records into a delivery stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.
\nBy default, each delivery stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each delivery stream. For more information about limits and how to request\n an increase, see Amazon\n Kinesis Data Firehose Limits.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecord
operation returns a RecordId
, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.
If the PutRecord
operation throws a\n ServiceUnavailableException
, back off and retry. If the exception persists,\n it is possible that the throughput limits have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites a single data record into an Amazon Kinesis Data Firehose delivery stream. To\n write multiple data records into a delivery stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.
\nBy default, each delivery stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each delivery stream. For more information about limits and how to request\n an increase, see Amazon\n Kinesis Data Firehose Limits.
\nKinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecord
operation returns a RecordId
, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.
If the PutRecord
operation throws a\n ServiceUnavailableException
, the API is automatically reinvoked (retried) 3\n times. If the exception persists, it is possible that the throughput limits have been\n exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.
\nData records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a delivery stream, use PutRecord. Applications using\n these operations are referred to as producers.
\nFor information about service quota, see Amazon Kinesis Data Firehose\n Quota.
\nEach PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecordBatch response includes a count of failed records,\n FailedPutCount
, and an array of responses, RequestResponses
.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount
may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses
array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses
includes both successfully and unsuccessfully processed\n records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.
A successfully processed record includes a RecordId
value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode
\n and ErrorMessage
values. ErrorCode
reflects the type of error,\n and is one of the following values: ServiceUnavailableException
or\n InternalFailure
. ErrorMessage
provides more detailed\n information about the error.
If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount
is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
,\n back off and retry. If the exception persists, it is possible that the throughput limits\n have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nWrites multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a delivery stream, use PutRecord. Applications using\n these operations are referred to as producers.
\nKinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.
\nFor information about service quota, see Amazon Kinesis Data Firehose\n Quota.
\nEach PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.
\nYou must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.
\nKinesis Data Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n
) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.
The PutRecordBatch response includes a count of failed records,\n FailedPutCount
, and an array of responses, RequestResponses
.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount
may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses
array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses
includes both successfully and unsuccessfully processed\n records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.
A successfully processed record includes a RecordId
value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode
\n and ErrorMessage
values. ErrorCode
reflects the type of error,\n and is one of the following values: ServiceUnavailableException
or\n InternalFailure
. ErrorMessage
provides more detailed\n information about the error.
If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount
is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.
If PutRecordBatch throws ServiceUnavailableException
,\n the API is automatically reinvoked (retried) 3 times. If the exception persists, it is\n possible that the throughput limits have been exceeded for the delivery stream.
Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.
\nData records sent to Kinesis Data Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.
\nDon't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.
\nThe KinesisStreamSourceDescription value for the source Kinesis\n data stream.
" } + }, + "MSKSourceDescription": { + "target": "com.amazonaws.firehose#MSKSourceDescription", + "traits": { + "smithy.api#documentation": "The configuration description for the Amazon MSK cluster to be used as the source for a delivery\n stream.
" + } } }, "traits": { @@ -5902,7 +6039,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables server-side encryption (SSE) for the delivery stream.
\nThis operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data\n Firehose first sets the encryption status of the stream to ENABLING
, and then\n to ENABLED
. The encryption status of a delivery stream is the\n Status
property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED
. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING
, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED
before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
\nEven if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
,\n Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.
If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED
, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS\n encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut
as its source.
The StartDeliveryStreamEncryption
and\n StopDeliveryStreamEncryption
operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption
13 times and\n StopDeliveryStreamEncryption
12 times for the same delivery stream in a\n 24-hour period.
Enables server-side encryption (SSE) for the delivery stream.
\nThis operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data\n Firehose first sets the encryption status of the stream to ENABLING
, and then\n to ENABLED
. The encryption status of a delivery stream is the\n Status
property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED
. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING
, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED
before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.
To check the encryption status of a delivery stream, use DescribeDeliveryStream.
\nEven if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK
,\n Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK
, Kinesis Data Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.
For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption
and CreateDeliveryStream
should not be called with session credentials that are more than 6 hours old.
If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED
, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is ENABLING_FAILED
, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS\n encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut
as its source.
The StartDeliveryStreamEncryption
and\n StopDeliveryStreamEncryption
operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption
13 times and\n StopDeliveryStreamEncryption
12 times for the same delivery stream in a\n 24-hour period.
An enum value representing possible filter fields.
", + "smithy.api#documentation": "An enum value representing possible filter fields.
\nReplace the enum value CLUSTER_NAME
with EKS_CLUSTER_NAME
.\n CLUSTER_NAME
has been deprecated.
Represents the field name used to sort the coverage details.
", + "smithy.api#documentation": "Represents the field name used to sort the coverage details.
\nReplace the enum value CLUSTER_NAME
with EKS_CLUSTER_NAME
.\n CLUSTER_NAME
has been deprecated.
Removes the existing GuardDuty delegated\n administrator of the organization. Only the organization's management account can run this \n API operation.
", + "smithy.api#documentation": "Removes the existing GuardDuty delegated\n administrator of the organization. Only the organization's management account can run this \n API operation.
", "smithy.api#http": { "method": "POST", "uri": "/admin/disable", @@ -4179,7 +4191,7 @@ "AdminAccountId": { "target": "com.amazonaws.guardduty#String", "traits": { - "smithy.api#documentation": "The Amazon Web Services Account ID for the organization account to be enabled as a GuardDuty delegated\n administrator.
", + "smithy.api#documentation": "The Amazon Web Services account ID for the organization account to be enabled as a GuardDuty delegated\n administrator.
", "smithy.api#jsonName": "adminAccountId", "smithy.api#required": {} } @@ -4349,7 +4361,7 @@ "CriterionKey": { "target": "com.amazonaws.guardduty#CriterionKey", "traits": { - "smithy.api#documentation": "An enum value representing possible scan properties to match with given scan\n entries.
", + "smithy.api#documentation": "An enum value representing possible scan properties to match with given scan\n entries.
\nReplace the enum value CLUSTER_NAME
with EKS_CLUSTER_NAME
.\n CLUSTER_NAME
has been deprecated.
Contains information about the finding, which is generated when abnormal or suspicious\n activity is detected.
" + "smithy.api#documentation": "Contains information about the finding that is generated when abnormal or suspicious\n activity is detected.
" } }, "com.amazonaws.guardduty#FindingCriteria": { @@ -7515,7 +7527,7 @@ } ], "traits": { - "smithy.api#documentation": "Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account \n that invokes this API. If you are using organizations to manager your GuardDuty environment, this step is not \n needed. For more information, see Managing accounts with organizations.
\nTo invite Amazon Web Services accounts, the first step is \n to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API\n to add accounts by invitation. The \n invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can \n choose to accept the invitation from only one Amazon Web Services account. For more information, see \n Managing GuardDuty accounts \n by invitation.
\nAfter the invite has been accepted and you choose to disassociate a member account \n (by using DisassociateMembers) from your account, \n the details of the member account obtained by invoking CreateMembers, including the \n associated email addresses, will be retained. \n This is done so that you can invoke InviteMembers without the need to invoke \n CreateMembers again. To \n remove the details associated with a member account, you must also invoke \n DeleteMembers.
", + "smithy.api#documentation": "Invites Amazon Web Services accounts to become members of an organization administered by the Amazon Web Services account \n that invokes this API. If you are using Amazon Web Services Organizations to manage your GuardDuty environment, this step is not\n needed. For more information, see Managing accounts with organizations.
\nTo invite Amazon Web Services accounts, the first step is \n to ensure that GuardDuty has been enabled in the potential member accounts. You can now invoke this API\n to add accounts by invitation. The \n invited accounts can either accept or decline the invitation from their GuardDuty accounts. Each invited Amazon Web Services account can \n choose to accept the invitation from only one Amazon Web Services account. For more information, see \n Managing GuardDuty accounts \n by invitation.
\nAfter the invite has been accepted and you choose to disassociate a member account \n (by using DisassociateMembers) from your account, \n the details of the member account obtained by invoking CreateMembers, including the \n associated email addresses, will be retained. \n This is done so that you can invoke InviteMembers without the need to invoke \n CreateMembers again. To \n remove the details associated with a member account, you must also invoke \n DeleteMembers.
", "smithy.api#http": { "method": "POST", "uri": "/detector/{DetectorId}/member/invite", @@ -8747,7 +8759,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the accounts configured as GuardDuty delegated administrators. \n Only the organization's management account can run this \n API operation.
", + "smithy.api#documentation": "Lists the accounts designated as GuardDuty delegated administrators. \n Only the organization's management account can run this \n API operation.
", "smithy.api#http": { "method": "GET", "uri": "/admin", @@ -8915,7 +8927,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists tags for a resource. Tagging is currently supported for detectors, finding filters,\n IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per resource. \n When invoked, this\n operation returns all assigned tags for a given resource.
", + "smithy.api#documentation": "Lists tags for a resource. Tagging is currently supported for detectors, finding filters,\n IP sets, threat intel sets, and publishing destination, with a limit of 50 tags per resource. \n When invoked, this\n operation returns all assigned tags for a given resource.
", "smithy.api#http": { "method": "GET", "uri": "/tags/{ResourceArn}", @@ -9999,7 +10011,7 @@ "AutoEnable": { "target": "com.amazonaws.guardduty#OrgFeatureStatus", "traits": { - "smithy.api#documentation": "The status of the feature that will be configured for the organization. Use one of the following \n values to configure the feature status for the entire organization:
\n\n NEW
: Indicates that when a new account joins the organization, they will\n have the feature enabled automatically.
\n ALL
: Indicates that all accounts in the organization have the feature\n enabled automatically. This includes NEW
accounts that join the organization\n and accounts that may have been suspended or removed from the organization in\n GuardDuty.
It may take up to 24 hours to update the configuration for all the member accounts.
\n\n NONE
: Indicates that the feature will not be automatically enabled for any\n account in the organization. The administrator must manage the feature for each account \n individually.
Describes the status of the feature that is configured for the\n member accounts within the organization. One of the following \n values is the status for the entire organization:
\n\n NEW
: Indicates that when a new account joins the organization, they will\n have the feature enabled automatically.
\n ALL
: Indicates that all accounts in the organization have the feature\n enabled automatically. This includes NEW
accounts that join the organization\n and accounts that may have been suspended or removed from the organization in\n GuardDuty.
It may take up to 24 hours to update the configuration for all the member accounts.
\n\n NONE
: Indicates that the feature will not be \n automatically enabled for any\n account in the organization. The administrator must manage the feature\n for each account individually.
Represents key\n in the map condition.
", + "smithy.api#documentation": "Represents the key in the map condition.
", "smithy.api#jsonName": "key", "smithy.api#required": {} } @@ -11286,13 +11298,13 @@ "Value": { "target": "com.amazonaws.guardduty#TagValue", "traits": { - "smithy.api#documentation": "Represents optional value\n in the map\n condition. If not specified, only key\n will be\n matched.
", + "smithy.api#documentation": "Represents optional value in the map\n condition. If not specified, only the key will be\n matched.
", "smithy.api#jsonName": "value" } } }, "traits": { - "smithy.api#documentation": "Represents key, value pair to be matched against given resource property.
" + "smithy.api#documentation": "Represents the key:value
pair to be matched against given resource property.
Configures the delegated administrator account with the provided values. You must provide\n a value for either autoEnableOrganizationMembers
or autoEnable
, but not \n both.
There might be regional differences because some data sources might not be \n available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more \n information, see Regions and endpoints.
", + "smithy.api#documentation": "Configures the delegated administrator account with the provided values. You must provide\n a value for either autoEnableOrganizationMembers
or autoEnable
, but not both.
There might be regional differences because some data sources might not be \n available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more \n information, see Regions and endpoints.
", "smithy.api#http": { "method": "POST", "uri": "/detector/{DetectorId}/admin", @@ -13088,7 +13100,7 @@ "smithy.api#deprecated": { "message": "This field is deprecated, use AutoEnableOrganizationMembers instead" }, - "smithy.api#documentation": "Indicates whether to automatically enable member accounts in the organization.
\nEven though this is still supported, we recommend using\n AutoEnableOrganizationMembers
to achieve the similar results. You must provide the \n value for either autoEnableOrganizationMembers
or autoEnable
.
Represents whether or not to automatically enable member accounts in the organization.
\nEven though this is still supported, we recommend using\n AutoEnableOrganizationMembers
to achieve the similar results. You must provide a \n value for either autoEnableOrganizationMembers
or autoEnable
.
Indicates the auto-enablement configuration of GuardDuty for the member accounts in the\n organization. You must provide a \n value for either autoEnableOrganizationMembers
or autoEnable
.
Use one of the \n following configuration values for autoEnableOrganizationMembers
:
\n NEW
: Indicates that when a new account joins the organization, they will\n have GuardDuty enabled automatically.
\n ALL
: Indicates that all accounts in the organization have GuardDuty\n enabled automatically. This includes NEW
accounts that join the organization\n and accounts that may have been suspended or removed from the organization in\n GuardDuty.
It may take up to 24 hours to update the configuration for all the member accounts.
\n\n NONE
: Indicates that GuardDuty will not be automatically enabled for any\n account in the organization. The administrator must manage GuardDuty for each account in the organization\n individually.
Indicates the auto-enablement configuration of GuardDuty for the member accounts in the\n organization. You must provide a value for either autoEnableOrganizationMembers
or autoEnable
.
Use one of the \n following configuration values for autoEnableOrganizationMembers
:
\n NEW
: Indicates that when a new account joins the organization, they will\n have GuardDuty enabled automatically.
\n ALL
: Indicates that all accounts in the organization have GuardDuty\n enabled automatically. This includes NEW
accounts that join the organization\n and accounts that may have been suspended or removed from the organization in\n GuardDuty.
It may take up to 24 hours to update the configuration for all the member accounts.
\n\n NONE
: Indicates that GuardDuty will not be automatically enabled for any\n account in the organization. The administrator must manage GuardDuty for each account in the organization individually.
The criteria that determine if a device is behaving normally in regard to\n the metric
.
The criteria that determine if a device is behaving normally in regard to the metric
.
In the IoT console, you can choose to be sent an alert through Amazon SNS when IoT Device Defender detects that a device is behaving anomalously.
\nCreates an X.509 certificate using the specified certificate signing\n request.\n
\nRequires permission to access the CreateCertificateFromCsr action.\n
\nThe CSR must include a public key that is either an\n RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves. \n For supported certificates, consult \n Certificate signing algorithms supported by IoT.\n
\nReusing the same certificate signing request (CSR)\n results in a distinct certificate.
\nYou can create multiple certificates in a batch by creating a directory, copying\n multiple .csr
files into that directory, and then specifying that directory on the command\n line. The following commands show how to create a batch of certificates given a batch of\n CSRs. In the following commands, we assume that a set of CSRs are located inside of the\n directory my-csr-directory:
On Linux and OS X, the command is:
\n\n $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/{}
\n
This command lists all of the CSRs in my-csr-directory and pipes each CSR file name\n to the aws iot create-certificate-from-csr
Amazon Web Services CLI command to create a certificate for\n the corresponding CSR.
You can also run the aws iot create-certificate-from-csr
part of the\n command in parallel to speed up the certificate creation process:
\n $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/{}\n
\n
On Windows PowerShell, the command to create certificates for all CSRs in\n my-csr-directory is:
\n\n > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/$_}\n
\n
On a Windows command prompt, the command to create certificates for all CSRs in\n my-csr-directory is:
\n\n > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr\n --certificate-signing-request file://@path\"\n
\n
Creates an X.509 certificate using the specified certificate signing\n request.\n
\nRequires permission to access the CreateCertificateFromCsr action.\n
\nThe CSR must include a public key that is either an RSA key with a length of at least\n 2048 bits or an ECC key from NIST P-256, NIST P-384, or NIST P-521 curves. For supported\n certificates, consult Certificate signing algorithms supported by IoT.
\nReusing the same certificate signing request (CSR)\n results in a distinct certificate.
\nYou can create multiple certificates in a batch by creating a directory, copying\n multiple .csr
files into that directory, and then specifying that directory on the command\n line. The following commands show how to create a batch of certificates given a batch of\n CSRs. In the following commands, we assume that a set of CSRs are located inside of the\n directory my-csr-directory:
On Linux and OS X, the command is:
\n\n $ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/{}
\n
This command lists all of the CSRs in my-csr-directory and pipes each CSR file name\n to the aws iot create-certificate-from-csr
Amazon Web Services CLI command to create a certificate for\n the corresponding CSR.
You can also run the aws iot create-certificate-from-csr
part of the\n command in parallel to speed up the certificate creation process:
\n $ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/{}\n
\n
On Windows PowerShell, the command to create certificates for all CSRs in\n my-csr-directory is:
\n\n > ls -Name my-csr-directory | %{aws iot create-certificate-from-csr\n --certificate-signing-request file://my-csr-directory/$_}\n
\n
On a Windows command prompt, the command to create certificates for all CSRs in\n my-csr-directory is:
\n\n > forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr\n --certificate-signing-request file://@path\"\n
\n
An S3 link to the job document to use in the template. Required if you don't specify a value for document
.
If the job document resides in an S3 bucket, you must use a placeholder link when specifying the document.
\nThe placeholder link is of the following form:
\n\n ${aws:iot:s3-presigned-url:https://s3.amazonaws.com/bucket/key}
\n
where bucket is your bucket name and key is the object in the bucket to which you are linking.
\nAn S3 link, or S3 object URL, to the job document. The link is an Amazon S3 object URL and is required if you don't specify a value for document
.
For example, --document-source https://s3.region-code.amazonaws.com/example-firmware/device-firmware.1.0
\n
For more information, see Methods for accessing a bucket.
" } }, "document": { @@ -7705,7 +7705,7 @@ "additionalParameters": { "target": "com.amazonaws.iot#AdditionalParameterMap", "traits": { - "smithy.api#documentation": "A list of additional OTA update parameters which are name-value pairs.
" + "smithy.api#documentation": "A list of additional OTA update parameters, which are name-value pairs. \n They won't be sent to devices as a part of the Job document.
" } }, "tags": { @@ -7798,7 +7798,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the new package.
", + "smithy.api#documentation": "The name of the new software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7834,7 +7834,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the package.
" + "smithy.api#documentation": "The name of the software package.
" } }, "packageArn": { @@ -7895,7 +7895,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the associated package.
", + "smithy.api#documentation": "The name of the associated software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7951,7 +7951,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the associated package.
" + "smithy.api#documentation": "The name of the associated software package.
" } }, "versionName": { @@ -10560,7 +10560,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the target package.
", + "smithy.api#documentation": "The name of the target software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10605,7 +10605,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a specific version from a software package.
\n\n Note: If a package version is designated as default, you must remove the designation from the package using the UpdatePackage action.
", + "smithy.api#documentation": "Deletes a specific version from a software package.
\n\n Note: If a package version is designated as default, you must remove the designation from the software package using the UpdatePackage action.
", "smithy.api#http": { "method": "DELETE", "uri": "/packages/{packageName}/versions/{versionName}", @@ -10620,7 +10620,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the associated package.
", + "smithy.api#documentation": "The name of the associated software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -12811,7 +12811,7 @@ "endpointType": { "target": "com.amazonaws.iot#EndpointType", "traits": { - "smithy.api#documentation": "The endpoint type. Valid endpoint types include:
\n\n iot:Data
- Returns a VeriSign signed data endpoint.
\n iot:Data-ATS
- Returns an ATS signed data endpoint.
\n iot:CredentialProvider
- Returns an IoT credentials provider API\n endpoint.
\n iot:Jobs
- Returns an IoT device management Jobs API\n endpoint.
We strongly recommend that customers use the newer iot:Data-ATS
endpoint type to avoid \n issues related to the widespread distrust of Symantec certificate authorities.
The endpoint type. Valid endpoint types include:
\n\n iot:Data
- Returns a VeriSign signed data endpoint.
\n iot:Data-ATS
- Returns an ATS signed data endpoint.
\n iot:CredentialProvider
- Returns an IoT credentials provider API\n endpoint.
\n iot:Jobs
- Returns an IoT device management Jobs API\n endpoint.
We strongly recommend that customers use the newer iot:Data-ATS
endpoint type to avoid \n issues related to the widespread distrust of Symantec certificate authorities. ATS Signed Certificates\n are more secure and are trusted by most popular browsers.
The name of the target package.
", + "smithy.api#documentation": "The name of the target software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -17158,7 +17158,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the package.
" + "smithy.api#documentation": "The name of the software package.
" } }, "packageArn": { @@ -17263,7 +17263,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the package.
" + "smithy.api#documentation": "The name of the software package.
" } }, "versionName": { @@ -19397,12 +19397,70 @@ "smithy.api#documentation": "Properties of the Apache Kafka producer client.
", "smithy.api#required": {} } + }, + "headers": { + "target": "com.amazonaws.iot#KafkaHeaders", + "traits": { + "smithy.api#documentation": "The list of Kafka headers that you specify.
" + } } }, "traits": { "smithy.api#documentation": "Send messages to an Amazon Managed Streaming for Apache Kafka (Amazon MSK) or self-managed Apache Kafka cluster.
" } }, + "com.amazonaws.iot#KafkaActionHeader": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.iot#KafkaHeaderKey", + "traits": { + "smithy.api#documentation": "The key of the Kafka header.
", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.iot#KafkaHeaderValue", + "traits": { + "smithy.api#documentation": "The value of the Kafka header.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies a Kafka header using key-value pairs when you create a Rule’s Kafka Action.\n You can use these headers to route data from IoT clients to downstream Kafka clusters\n without modifying your message payload.
\nFor more information about Rule's Kafka action, see Apache Kafka.\n
" + } + }, + "com.amazonaws.iot#KafkaHeaderKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 16384 + } + } + }, + "com.amazonaws.iot#KafkaHeaderValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 16384 + } + } + }, + "com.amazonaws.iot#KafkaHeaders": { + "type": "list", + "member": { + "target": "com.amazonaws.iot#KafkaActionHeader" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, "com.amazonaws.iot#Key": { "type": "string" }, @@ -22243,7 +22301,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the target package.
", + "smithy.api#documentation": "The name of the target software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -25234,6 +25292,18 @@ "traits": { "smithy.api#enumValue": "PRINCIPAL_ID" } + }, + "EVENT_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EVENT_TYPE" + } + }, + "DEVICE_DEFENDER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEVICE_DEFENDER" + } } } }, @@ -26100,7 +26170,7 @@ "attributes": { "target": "com.amazonaws.iot#AttributesMap", "traits": { - "smithy.api#documentation": "A list of name/attribute pairs.
" + "smithy.api#documentation": "A list of name-attribute pairs. They won't be sent to devices as a part of the Job document.
" } } }, @@ -26445,7 +26515,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name for the target package.
" + "smithy.api#documentation": "The name for the target software package.
" } }, "defaultVersionName": { @@ -28895,13 +28965,13 @@ "startTime": { "target": "com.amazonaws.iot#StringDateTime", "traits": { - "smithy.api#documentation": "The time a job will begin rollout of the job document to all devices in the target\n group for a job. The startTime
can be scheduled up to a year in advance and\n must be scheduled a minimum of thirty minutes from the current time. The date and time\n format for the startTime
is YYYY-MM-DD for the date and HH:MM for the\n time.
The time a job will begin rollout of the job document to all devices in the target\n group for a job. The startTime
can be scheduled up to a year in advance and\n must be scheduled a minimum of thirty minutes from the current time. The date and time\n format for the startTime
is YYYY-MM-DD for the date and HH:MM for the\n time.
For more information on the syntax for startTime
when using an API\n command or the Command Line Interface, see Timestamp.
The time a job will stop rollout of the job document to all devices in the target\n group for a job. The endTime
must take place no later than two years from\n the current time and be scheduled a minimum of thirty minutes from the current time. The\n minimum duration between startTime
and endTime
is thirty\n minutes. The maximum duration between startTime
and endTime
is\n two years. The date and time format for the endTime
is YYYY-MM-DD for the\n date and HH:MM for the time.
The time a job will stop rollout of the job document to all devices in the target\n group for a job. The endTime
must take place no later than two years from\n the current time and be scheduled a minimum of thirty minutes from the current time. The\n minimum duration between startTime
and endTime
is thirty\n minutes. The maximum duration between startTime
and endTime
is\n two years. The date and time format for the endTime
is YYYY-MM-DD for the\n date and HH:MM for the time.
For more information on the syntax for endTime
when using an API command\n or the Command Line Interface, see Timestamp.
Updates the supported fields for a specific package.
\nRequires permission to access the UpdatePackage and GetIndexingConfiguration actions.
", + "smithy.api#documentation": "Updates the supported fields for a specific software package.
\nRequires permission to access the UpdatePackage and GetIndexingConfiguration actions.
", "smithy.api#http": { "method": "PATCH", "uri": "/packages/{packageName}", @@ -33891,7 +33961,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the package configuration.
\nRequires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
", + "smithy.api#documentation": "Updates the software package configuration.
\nRequires permission to access the UpdatePackageConfiguration and iam:PassRole actions.
", "smithy.api#http": { "method": "PATCH", "uri": "/package-configuration", @@ -33935,7 +34005,7 @@ "packageName": { "target": "com.amazonaws.iot#PackageName", "traits": { - "smithy.api#documentation": "The name of the target package.
", + "smithy.api#documentation": "The name of the target software package.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -34038,7 +34108,7 @@ "attributes": { "target": "com.amazonaws.iot#ResourceAttributes", "traits": { - "smithy.api#documentation": "Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.
\n\n Note: Attributes can be updated only when the package version\n is in a draft state.
\nThe combined size of all the attributes on a package version is limited to 3KB.
" + "smithy.api#documentation": "Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet.
\n\n Note: Attributes can be updated only when the package version\n is in a draft state.
\nThe combined size of all the attributes on a package version is limited to 3KB.
" } }, "action": { diff --git a/codegen/sdk/aws-models/kinesis-video-archived-media.json b/codegen/sdk/aws-models/kinesis-video-archived-media.json index c99b212a3da..b09c67c6e3c 100644 --- a/codegen/sdk/aws-models/kinesis-video-archived-media.json +++ b/codegen/sdk/aws-models/kinesis-video-archived-media.json @@ -1646,6 +1646,12 @@ "method": "POST", "uri": "/getImages", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Images", + "pageSize": "MaxResults" } } }, @@ -2126,6 +2132,12 @@ "method": "POST", "uri": "/listFragments", "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Fragments", + "pageSize": "MaxResults" } } }, diff --git a/codegen/sdk/aws-models/kinesis-video.json b/codegen/sdk/aws-models/kinesis-video.json index 1edd9ca070c..d6fdbc2ee87 100644 --- a/codegen/sdk/aws-models/kinesis-video.json +++ b/codegen/sdk/aws-models/kinesis-video.json @@ -1014,7 +1014,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the most current information about the channel. Specify the ChannelName
\n or ChannelARN
in the input.
This API is related to WebRTC Ingestion and is only available in the us-west-2
region.
Returns the most current information about the channel. Specify the ChannelName
\n or ChannelARN
in the input.
The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 33 ms, \n because a camera that generates content at 30 FPS would create a frame every 33.3 ms. If the timestamp range is less than the sampling interval, the \n Image from the StartTimestamp
will be returned if available.
The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 200 ms. If the timestamp range is less than the sampling interval, the \n Image from the StartTimestamp
will be returned if available.
An asynchronous API that updates a stream’s existing edge configuration. \n The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass \n component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary\n and depends on the connectivity of the Hub Device. \n The SyncStatus
will be updated as the edge configuration is acknowledged, \n and synced with the Edge Agent.
If this API is invoked for the first time, a new edge configuration will be created for the stream,\n and the sync status will be set to SYNCING
. You will have to wait for the sync status\n to reach a terminal state such as: IN_SYNC
, or SYNC_FAILED
, before using this API again. \n If you invoke this API during the syncing process, a ResourceInUseException
will be thrown. \n The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes,\n the status will transition into the SYNC_FAILED
state.
An asynchronous API that updates a stream’s existing edge configuration. \n The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass \n component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary\n and depends on the connectivity of the Hub Device. \n The SyncStatus
will be updated as the edge configuration is acknowledged, \n and synced with the Edge Agent.
If this API is invoked for the first time, a new edge configuration will be created for the stream,\n and the sync status will be set to SYNCING
. You will have to wait for the sync status\n to reach a terminal state such as: IN_SYNC
, or SYNC_FAILED
, before using this API again. \n If you invoke this API during the syncing process, a ResourceInUseException
will be thrown. \n The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes,\n the status will transition into the SYNC_FAILED
state.
To move an edge configuration from one device to another, use DeleteEdgeConfiguration to delete\n the current edge configuration. You can then invoke StartEdgeConfigurationUpdate with an updated Hub Device ARN.
", "smithy.api#http": { "method": "POST", "uri": "/startEdgeConfigurationUpdate", @@ -4533,7 +4533,7 @@ } ], "traits": { - "smithy.api#documentation": "Associates a SignalingChannel
to a stream to store the media. There are two signaling modes that \n can specified :
If the StorageStatus
is disabled, no data will be stored,\n and the StreamARN
parameter will not be needed.
If the StorageStatus
is enabled, the data will be stored in the \n StreamARN
provided.
If StorageStatus
is enabled, direct peer-to-peer (master-viewer) connections no\n longer occur. Peers connect directly to the storage session. You must call the\n JoinStorageSession
API to trigger an SDP offer send and establish a\n connection between a peer and the storage session.
This API is related to WebRTC Ingestion and is only available in the us-west-2
region.
Associates a SignalingChannel
to a stream to store the media. There are two signaling modes that \n can specified :
If the StorageStatus
is disabled, no data will be stored,\n and the StreamARN
parameter will not be needed.
If the StorageStatus
is enabled, the data will be stored in the \n StreamARN
provided.
If StorageStatus
is enabled, direct peer-to-peer (master-viewer) connections no\n longer occur. Peers connect directly to the storage session. You must call the\n JoinStorageSession
API to trigger an SDP offer send and establish a\n connection between a peer and the storage session.
Enforce Lake Formation permissions for the given databases, tables, and principals.
", + "smithy.api#http": { + "method": "POST", + "uri": "/CreateLakeFormationOptIn", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#CreateLakeFormationOptInRequest": { + "type": "structure", + "members": { + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "traits": { + "smithy.api#required": {} + } + }, + "Resource": { + "target": "com.amazonaws.lakeformation#Resource", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.lakeformation#CreateLakeFormationOptInResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.lakeformation#CredentialTimeoutDurationSecondInteger": { "type": "integer", "traits": { @@ -2501,6 +2574,70 @@ "smithy.api#output": {} } }, + "com.amazonaws.lakeformation#DeleteLakeFormationOptIn": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#DeleteLakeFormationOptInRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#DeleteLakeFormationOptInResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "Remove the Lake Formation permissions enforcement of the given databases, tables, and principals.
", + "smithy.api#http": { + "method": "POST", + "uri": "/DeleteLakeFormationOptIn", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#DeleteLakeFormationOptInRequest": { + "type": "structure", + "members": { + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "traits": { + "smithy.api#required": {} + } + }, + "Resource": { + "target": "com.amazonaws.lakeformation#Resource", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.lakeformation#DeleteLakeFormationOptInResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.lakeformation#DeleteObjectInput": { "type": "structure", "members": { @@ -4331,7 +4468,7 @@ "TagValues": { "target": "com.amazonaws.lakeformation#TagValueList", "traits": { - "smithy.api#documentation": "A list of possible values an attribute can take.
", + "smithy.api#documentation": "A list of possible values an attribute can take.
\nThe maximum number of values that can be defined for a LF-Tag is 1000. A single API call\n supports 50 values. You can use multiple API calls to add more values.
", "smithy.api#required": {} } } @@ -4482,6 +4619,38 @@ } } }, + "com.amazonaws.lakeformation#LakeFormationOptInsInfo": { + "type": "structure", + "members": { + "Resource": { + "target": "com.amazonaws.lakeformation#Resource" + }, + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal" + }, + "LastModified": { + "target": "com.amazonaws.lakeformation#LastModifiedTimestamp", + "traits": { + "smithy.api#documentation": "The last modified date and time of the record.
" + } + }, + "LastUpdatedBy": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "The user who updated the record.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A single principal-resource pair that has Lake Formation permissins enforced.
" + } + }, + "com.amazonaws.lakeformation#LakeFormationOptInsInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#LakeFormationOptInsInfo" + } + }, "com.amazonaws.lakeformation#LastModifiedTimestamp": { "type": "timestamp" }, @@ -4660,6 +4829,91 @@ "smithy.api#output": {} } }, + "com.amazonaws.lakeformation#ListLakeFormationOptIns": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#ListLakeFormationOptInsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#ListLakeFormationOptInsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieve the current list of resources and principals that are opt in to enforce Lake Formation permissions.
", + "smithy.api#http": { + "method": "POST", + "uri": "/ListLakeFormationOptIns", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.lakeformation#ListLakeFormationOptInsRequest": { + "type": "structure", + "members": { + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal" + }, + "Resource": { + "target": "com.amazonaws.lakeformation#Resource", + "traits": { + "smithy.api#documentation": "A structure for the resource.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.lakeformation#PageSize", + "traits": { + "smithy.api#documentation": "The maximum number of results to return.
" + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "A continuation token, if this is not the first call to retrieve this list.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.lakeformation#ListLakeFormationOptInsResponse": { + "type": "structure", + "members": { + "LakeFormationOptInsInfoList": { + "target": "com.amazonaws.lakeformation#LakeFormationOptInsInfoList", + "traits": { + "smithy.api#documentation": "A list of principal-resource pairs that have Lake Formation permissins enforced.
" + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "A continuation token, if this is not the first call to retrieve this list.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.lakeformation#ListPermissions": { "type": "operation", "input": { @@ -5445,6 +5699,18 @@ "traits": { "smithy.api#documentation": "This attribute can be used to return any additional details of PrincipalResourcePermissions
. Currently returns only as a RAM resource share ARN.
The date and time when the resource was last updated.
" + } + }, + "LastUpdatedBy": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "The user who updated the record.
" + } } }, "traits": { @@ -5675,6 +5941,12 @@ "traits": { "smithy.api#documentation": "Whether or not the resource is a federated resource.
" } + }, + "HybridAccessEnabled": { + "target": "com.amazonaws.lakeformation#NullableBoolean", + "traits": { + "smithy.api#documentation": "\n Specifies whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.\n
" + } } }, "traits": { @@ -5855,6 +6127,12 @@ "traits": { "smithy.api#documentation": "Whether or not the resource is a federated resource.
" } + }, + "HybridAccessEnabled": { + "target": "com.amazonaws.lakeformation#NullableBoolean", + "traits": { + "smithy.api#documentation": "\n Indicates whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.\n
" + } } }, "traits": { @@ -7104,6 +7382,12 @@ "traits": { "smithy.api#documentation": "Whether or not the resource is a federated resource.
" } + }, + "HybridAccessEnabled": { + "target": "com.amazonaws.lakeformation#NullableBoolean", + "traits": { + "smithy.api#documentation": "\n Specifies whether the data access of tables pointing to the location can be managed by both Lake Formation permissions as well as Amazon S3 bucket policies.\n
" + } } }, "traits": { diff --git a/codegen/sdk/aws-models/macie2.json b/codegen/sdk/aws-models/macie2.json index a199bca8a3b..12c6f8e8643 100644 --- a/codegen/sdk/aws-models/macie2.json +++ b/codegen/sdk/aws-models/macie2.json @@ -1765,7 +1765,7 @@ "managedDataIdentifierSelector": { "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", "traits": { - "smithy.api#documentation": "The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:
ALL (default) - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. If you specify this value, specify at least one custom data identifier for the job (customDataIdentifierIds) and don't specify any values for the managedDataIdentifierIds property.
RECOMMENDED - Use only the set of managed data identifiers that Amazon Web Services recommends for jobs. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
If you don't specify a value for this property, the job uses all managed data identifiers.
If the job is a recurring job and you don't specify a value for this property or you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the job starts to run.
For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.
", + "smithy.api#documentation": "The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:
ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property.
RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
If you don't specify a value for this property, the job uses the recommended set of managed data identifiers.
If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts.
For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.
", "smithy.api#jsonName": "managedDataIdentifierSelector" } }, @@ -3331,7 +3331,7 @@ "managedDataIdentifierSelector": { "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", "traits": { - "smithy.api#documentation": "The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:
ALL (default) - Use all managed data identifiers.
EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).
RECOMMENDED - Use only the set of managed data identifiers that Amazon Web Services recommends for jobs.
If this value is null, the job uses all managed data identifiers.
If the job is a recurring job and this value is null, ALL, or EXCLUDE, each job run automatically uses new managed data identifiers that are released after the job was created or the preceding run ended. If this value is RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.
For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.
", + "smithy.api#documentation": "The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:
ALL - Use all managed data identifiers.
EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).
RECOMMENDED (default) - Use the recommended set of managed data identifiers.
If this value is null, the job uses the recommended set of managed data identifiers.
If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.
For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.
", "smithy.api#jsonName": "managedDataIdentifierSelector" } }, diff --git a/codegen/sdk/aws-models/mediaconvert.json b/codegen/sdk/aws-models/mediaconvert.json index 9438c477c8a..8fb818f3433 100644 --- a/codegen/sdk/aws-models/mediaconvert.json +++ b/codegen/sdk/aws-models/mediaconvert.json @@ -4464,10 +4464,16 @@ "traits": { "smithy.api#enumValue": "ALTERNATE_AUDIO_NOT_AUTO_SELECT" } + }, + "AUDIO_ONLY_VARIANT_STREAM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUDIO_ONLY_VARIANT_STREAM" + } } }, "traits": { - "smithy.api#documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." + "smithy.api#documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for any variant that you want to prohibit the client from playing with video. This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF in the HLS manifest. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting." } }, "com.amazonaws.mediaconvert#CmfcDescriptiveVideoServiceFlag": { @@ -4617,7 +4623,7 @@ "AudioTrackType": { "target": "com.amazonaws.mediaconvert#CmfcAudioTrackType", "traits": { - "smithy.api#documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", + "smithy.api#documentation": "Use this setting to control the values that MediaConvert puts in your HLS parent playlist to control how the client player selects which audio track to play. Choose Audio-only variant stream (AUDIO_ONLY_VARIANT_STREAM) for any variant that you want to prohibit the client from playing with video. This causes MediaConvert to represent the variant as an EXT-X-STREAM-INF in the HLS manifest. The other options for this setting determine the values that MediaConvert writes for the DEFAULT and AUTOSELECT attributes of the EXT-X-MEDIA entry for the audio variant. For more information about these attributes, see the Apple documentation article https://developer.apple.com/documentation/http_live_streaming/example_playlists_for_http_live_streaming/adding_alternate_media_to_a_playlist. Choose Alternate audio, auto select, default to set DEFAULT=YES and AUTOSELECT=YES. Choose this value for only one variant in your output group. Choose Alternate audio, auto select, not default to set DEFAULT=NO and AUTOSELECT=YES. Choose Alternate Audio, Not Auto Select to set DEFAULT=NO and AUTOSELECT=NO. When you don't specify a value for this setting, MediaConvert defaults to Alternate audio, auto select, default. When there is more than one variant in your output group, you must explicitly choose a value for this setting.", "smithy.api#jsonName": "audioTrackType" } }, @@ -13649,6 +13655,13 @@ "smithy.api#jsonName": "extendedDataServices" } }, + "FollowInputIndex": { + "target": "com.amazonaws.mediaconvert#__integerMin0Max149", + "traits": { + "smithy.api#documentation": "Specifies which input metadata to use for the default \"Follow input\" option for the following settings: resolution, frame rate, and pixel aspect ratio. In the simplest case, specify which input is used based on its index in the job. For example if you specify 3, then the fourth input will be used from each input. If the job does not have a fourth input, then the first input will be used. If no followInputIndex is specified, then 0 will be chosen automatically.", + "smithy.api#jsonName": "followInputIndex" + } + }, "Inputs": { "target": "com.amazonaws.mediaconvert#__listOfInput", "traits": { @@ -13906,6 +13919,13 @@ "smithy.api#jsonName": "extendedDataServices" } }, + "FollowInputIndex": { + "target": "com.amazonaws.mediaconvert#__integerMin0Max149", + "traits": { + "smithy.api#documentation": "Specifies which input metadata to use for the default \"Follow input\" option for the following settings: resolution, frame rate, and pixel aspect ratio. In the simplest case, specify which input is used based on its index in the job. For example if you specify 3, then the fourth input will be used from each input. If the job does not have a fourth input, then the first input will be used. If no followInputIndex is specified, then 0 will be chosen automatically.", + "smithy.api#jsonName": "followInputIndex" + } + }, "Inputs": { "target": "com.amazonaws.mediaconvert#__listOfInputTemplate", "traits": { @@ -21336,7 +21356,7 @@ "StorageClass": { "target": "com.amazonaws.mediaconvert#S3StorageClass", "traits": { - "smithy.api#documentation": "Specify the S3 storage class to use for this destination.", + "smithy.api#documentation": "Specify the S3 storage class to use for this output. To use your destination's default storage class: Keep the default value, Not set. For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html", "smithy.api#jsonName": "storageClass" } } @@ -21473,7 +21493,7 @@ } }, "traits": { - "smithy.api#documentation": "Specify the S3 storage class to use for this destination." + "smithy.api#documentation": "Specify the S3 storage class to use for this output. To use your destination's default storage class: Keep the default value, Not set. For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html" } }, "com.amazonaws.mediaconvert#SampleRangeConversion": { @@ -21620,7 +21640,7 @@ } }, "Url": { - "target": "com.amazonaws.mediaconvert#__stringPatternHttps", + "target": "com.amazonaws.mediaconvert#__stringPatternHttpsD", "traits": { "smithy.api#documentation": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.", "smithy.api#jsonName": "url" @@ -21663,7 +21683,7 @@ } }, "Url": { - "target": "com.amazonaws.mediaconvert#__stringPatternHttps", + "target": "com.amazonaws.mediaconvert#__stringPatternHttpsD", "traits": { "smithy.api#documentation": "Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.", "smithy.api#jsonName": "url" @@ -24960,6 +24980,15 @@ } } }, + "com.amazonaws.mediaconvert#__integerMin0Max149": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 149 + } + } + }, "com.amazonaws.mediaconvert#__integerMin0Max15": { "type": "integer", "traits": { @@ -26343,6 +26372,12 @@ "smithy.api#pattern": "^https:\\/\\/$" } }, + "com.amazonaws.mediaconvert#__stringPatternHttpsD": { + "type": "string", + "traits": { + "smithy.api#pattern": "^https:\\/\\/[^:@\\/]*(:\\d*)?(\\/.*)?$" + } + }, "com.amazonaws.mediaconvert#__stringPatternHttpsKantarmedia": { "type": "string", "traits": { diff --git a/codegen/sdk/aws-models/outposts.json b/codegen/sdk/aws-models/outposts.json index cbfac2153db..7b46dba588c 100644 --- a/codegen/sdk/aws-models/outposts.json +++ b/codegen/sdk/aws-models/outposts.json @@ -129,7 +129,7 @@ } }, "traits": { - "smithy.api#documentation": "\n Information about an address.\n
" + "smithy.api#documentation": "Information about an address.
" } }, "com.amazonaws.outposts#AddressLine1": { @@ -205,36 +205,36 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "\n The ID of the asset.\n
" + "smithy.api#documentation": "The ID of the asset.
" } }, "RackId": { "target": "com.amazonaws.outposts#RackId", "traits": { - "smithy.api#documentation": "\n The rack ID of the asset. \n
" + "smithy.api#documentation": "The rack ID of the asset.
" } }, "AssetType": { "target": "com.amazonaws.outposts#AssetType", "traits": { - "smithy.api#documentation": "\n The type of the asset.\n
" + "smithy.api#documentation": "The type of the asset.
" } }, "ComputeAttributes": { "target": "com.amazonaws.outposts#ComputeAttributes", "traits": { - "smithy.api#documentation": "\n Information about compute hardware assets. \n
" + "smithy.api#documentation": "Information about compute hardware assets.
" } }, "AssetLocation": { "target": "com.amazonaws.outposts#AssetLocation", "traits": { - "smithy.api#documentation": "\n The position of an asset in a rack. \n
" + "smithy.api#documentation": "The position of an asset in a rack.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about hardware assets.\n
" + "smithy.api#documentation": "Information about hardware assets.
" } }, "com.amazonaws.outposts#AssetListDefinition": { @@ -249,12 +249,12 @@ "RackElevation": { "target": "com.amazonaws.outposts#RackElevation", "traits": { - "smithy.api#documentation": "\n The position of an asset in a rack measured in rack units.\n
" + "smithy.api#documentation": "The position of an asset in a rack measured in rack units.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about the position of the asset in a rack. \n
" + "smithy.api#documentation": "Information about the position of the asset in a rack.
" } }, "com.amazonaws.outposts#AssetState": { @@ -393,7 +393,7 @@ "OrderId": { "target": "com.amazonaws.outposts#OrderId", "traits": { - "smithy.api#documentation": "\n The ID of the order.\n
", + "smithy.api#documentation": "The ID of the order.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -416,25 +416,25 @@ "CatalogItemId": { "target": "com.amazonaws.outposts#SkuCode", "traits": { - "smithy.api#documentation": "\n The ID of the catalog item.\n
" + "smithy.api#documentation": "The ID of the catalog item.
" } }, "ItemStatus": { "target": "com.amazonaws.outposts#CatalogItemStatus", "traits": { - "smithy.api#documentation": "\n The status of a catalog item.\n
" + "smithy.api#documentation": "The status of a catalog item.
" } }, "EC2Capacities": { "target": "com.amazonaws.outposts#EC2CapacityListDefinition", "traits": { - "smithy.api#documentation": "\n Information about the EC2 capacity of an item.\n
" + "smithy.api#documentation": "Information about the EC2 capacity of an item.
" } }, "PowerKva": { "target": "com.amazonaws.outposts#CatalogItemPowerKva", "traits": { - "smithy.api#documentation": "\n Information about the power draw of an item.\n
" + "smithy.api#documentation": "Information about the power draw of an item.
" } }, "WeightLbs": { @@ -446,13 +446,13 @@ "SupportedUplinkGbps": { "target": "com.amazonaws.outposts#SupportedUplinkGbpsListDefinition", "traits": { - "smithy.api#documentation": "\n The uplink speed this catalog item requires for the \n connection to the Region.\n
" + "smithy.api#documentation": "The uplink speed this catalog item requires for the connection to the Region.
" } }, "SupportedStorage": { "target": "com.amazonaws.outposts#SupportedStorageList", "traits": { - "smithy.api#documentation": "\n The supported storage options for the catalog item.\n
" + "smithy.api#documentation": "The supported storage options for the catalog item.
" } } }, @@ -557,18 +557,24 @@ "HostId": { "target": "com.amazonaws.outposts#HostId", "traits": { - "smithy.api#documentation": "\n The host ID of the Dedicated Host on the asset.\n
" + "smithy.api#documentation": "The host ID of the Dedicated Host on the asset.
" } }, "State": { "target": "com.amazonaws.outposts#ComputeAssetState", "traits": { - "smithy.api#documentation": "The state.
\nACTIVE - The asset is available and can provide capacity for new compute resources.
\nISOLATED - The asset is undergoing maintenance and can't provide capacity for new compute resources.\n Existing compute resources on the asset are not affected.
\nRETIRING - The underlying hardware for the asset is degraded. Capacity for new compute resources is reduced.\n Amazon Web Services sends notifications for resources that must be stopped before the asset can be replaced.
\nThe state.
\nACTIVE - The asset is available and can provide capacity for new compute\n resources.
\nISOLATED - The asset is undergoing maintenance and can't provide capacity for new\n compute resources. Existing compute resources on the asset are not affected.
\nRETIRING - The underlying hardware for the asset is degraded. Capacity for new compute\n resources is reduced. Amazon Web Services sends notifications for resources that must be stopped before\n the asset can be replaced.
\nA list of the names of instance families that are currently associated with a given\n asset.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about compute hardware assets.\n
" + "smithy.api#documentation": "Information about compute hardware assets.
" } }, "com.amazonaws.outposts#ConflictException": { @@ -602,42 +608,42 @@ "ClientPublicKey": { "target": "com.amazonaws.outposts#WireGuardPublicKey", "traits": { - "smithy.api#documentation": "\n The public key of the client.\n
" + "smithy.api#documentation": "The public key of the client.
" } }, "ServerPublicKey": { "target": "com.amazonaws.outposts#WireGuardPublicKey", "traits": { - "smithy.api#documentation": "\n The public key of the server.\n
" + "smithy.api#documentation": "The public key of the server.
" } }, "ServerEndpoint": { "target": "com.amazonaws.outposts#ServerEndpoint", "traits": { - "smithy.api#documentation": "\n The endpoint for the server.\n
" + "smithy.api#documentation": "The endpoint for the server.
" } }, "ClientTunnelAddress": { "target": "com.amazonaws.outposts#CIDR", "traits": { - "smithy.api#documentation": "\n The client tunnel address. \n
" + "smithy.api#documentation": "The client tunnel address.
" } }, "ServerTunnelAddress": { "target": "com.amazonaws.outposts#CIDR", "traits": { - "smithy.api#documentation": "\n The server tunnel address.\n
" + "smithy.api#documentation": "The server tunnel address.
" } }, "AllowedIps": { "target": "com.amazonaws.outposts#CIDRList", "traits": { - "smithy.api#documentation": "\n The allowed IP addresses.\n
" + "smithy.api#documentation": "The allowed IP addresses.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about a connection. \n
" + "smithy.api#documentation": "Information about a connection.
" } }, "com.amazonaws.outposts#ConnectionId": { @@ -729,7 +735,7 @@ "OutpostIdentifier": { "target": "com.amazonaws.outposts#OutpostIdentifier", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#required": {} } }, @@ -824,7 +830,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#required": {} } }, @@ -843,7 +849,7 @@ "SupportedHardwareType": { "target": "com.amazonaws.outposts#SupportedHardwareType", "traits": { - "smithy.api#documentation": "\n The type of hardware for this Outpost.\n
" + "smithy.api#documentation": "The type of hardware for this Outpost.
" } } }, @@ -888,7 +894,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Creates a site for an Outpost.\n
", + "smithy.api#documentation": "Creates a site for an Outpost.
", "smithy.api#http": { "method": "POST", "uri": "/sites", @@ -917,19 +923,19 @@ "Tags": { "target": "com.amazonaws.outposts#TagMap", "traits": { - "smithy.api#documentation": "\n The tags to apply to a site.\n
" + "smithy.api#documentation": "The tags to apply to a site.
" } }, "OperatingAddress": { "target": "com.amazonaws.outposts#Address", "traits": { - "smithy.api#documentation": "\n The location to install and power on the hardware. This address might be \n different from the shipping address.\n
" + "smithy.api#documentation": "The location to install and power on the hardware. This address might be different from\n the shipping address.
" } }, "ShippingAddress": { "target": "com.amazonaws.outposts#Address", "traits": { - "smithy.api#documentation": "\n The location to ship the hardware. This address might be different \n from the operating address.\n
" + "smithy.api#documentation": "The location to ship the hardware. This address might be different from the operating\n address.
" } }, "RackPhysicalProperties": { @@ -994,7 +1000,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1051,7 +1057,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1094,19 +1100,19 @@ "Family": { "target": "com.amazonaws.outposts#Family", "traits": { - "smithy.api#documentation": "\n The family of the EC2 capacity.\n
" + "smithy.api#documentation": "The family of the EC2 capacity.
" } }, "MaxSize": { "target": "com.amazonaws.outposts#MaxSize", "traits": { - "smithy.api#documentation": "\n The maximum size of the EC2 capacity.\n
" + "smithy.api#documentation": "The maximum size of the EC2 capacity.
" } }, "Quantity": { "target": "com.amazonaws.outposts#Quantity", "traits": { - "smithy.api#documentation": "\n The quantity of the EC2 capacity.\n
" + "smithy.api#documentation": "The quantity of the EC2 capacity.
" } } }, @@ -1244,7 +1250,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Amazon Web Services uses this action to install Outpost servers.
\n\n Gets information about the specified connection.\n
\n\n Use CloudTrail to monitor this action or Amazon Web Services managed policy for Amazon Web Services Outposts to secure it. For \n more information, see \n Amazon Web Services managed policies for Amazon Web Services Outposts and \n Logging Amazon Web Services Outposts API calls with Amazon Web Services CloudTrail in the Amazon Web Services Outposts User Guide.\n
", + "smithy.api#documentation": "\n Amazon Web Services uses this action to install Outpost servers.
\nGets information about the specified connection.
\n\n Use CloudTrail to monitor this action or Amazon Web Services managed policy for Amazon Web Services Outposts to secure it. For \n more information, see \n Amazon Web Services managed policies for Amazon Web Services Outposts and \n Logging Amazon Web Services Outposts API calls with Amazon Web Services CloudTrail in the Amazon Web Services Outposts User Guide.\n
", "smithy.api#http": { "method": "GET", "uri": "/connections/{ConnectionId}", @@ -1258,7 +1264,7 @@ "ConnectionId": { "target": "com.amazonaws.outposts#ConnectionId", "traits": { - "smithy.api#documentation": "\n The ID of the connection.\n
", + "smithy.api#documentation": "The ID of the connection.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1274,13 +1280,13 @@ "ConnectionId": { "target": "com.amazonaws.outposts#ConnectionId", "traits": { - "smithy.api#documentation": "\n The ID of the connection.\n
" + "smithy.api#documentation": "The ID of the connection.
" } }, "ConnectionDetails": { "target": "com.amazonaws.outposts#ConnectionDetails", "traits": { - "smithy.api#documentation": "\n Information about the connection.\n
" + "smithy.api#documentation": "Information about the connection.
" } } }, @@ -1380,7 +1386,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1433,7 +1439,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1467,7 +1473,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID of the Outpost.\n
" + "smithy.api#documentation": "The ID of the Outpost.
" } }, "OutpostArn": { @@ -1543,7 +1549,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Gets the site address of the specified site.\n
", + "smithy.api#documentation": "Gets the site address of the specified site.
", "smithy.api#http": { "method": "GET", "uri": "/sites/{SiteId}/address", @@ -1557,7 +1563,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1590,7 +1596,7 @@ "Address": { "target": "com.amazonaws.outposts#Address", "traits": { - "smithy.api#documentation": "\n Information about the address.\n
" + "smithy.api#documentation": "Information about the address.
" } } }, @@ -1604,7 +1610,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1644,6 +1650,22 @@ "com.amazonaws.outposts#ISO8601Timestamp": { "type": "timestamp" }, + "com.amazonaws.outposts#InstanceFamilies": { + "type": "list", + "member": { + "target": "com.amazonaws.outposts#InstanceFamilyName" + } + }, + "com.amazonaws.outposts#InstanceFamilyName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^(?:.{1,200}/)?(?:[a-z0-9-_A-Z])+$" + } + }, "com.amazonaws.outposts#InstanceType": { "type": "string", "traits": { @@ -1712,7 +1734,7 @@ "CatalogItemId": { "target": "com.amazonaws.outposts#SkuCode", "traits": { - "smithy.api#documentation": "\n The ID of the catalog item.\n
" + "smithy.api#documentation": "The ID of the catalog item.
" } }, "LineItemId": { @@ -1737,13 +1759,13 @@ "ShipmentInformation": { "target": "com.amazonaws.outposts#ShipmentInformation", "traits": { - "smithy.api#documentation": "\n Information about a line item shipment.\n
" + "smithy.api#documentation": "Information about a line item shipment.
" } }, "AssetInformationList": { "target": "com.amazonaws.outposts#LineItemAssetInformationList", "traits": { - "smithy.api#documentation": "\n Information about assets.\n
" + "smithy.api#documentation": "Information about assets.
" } }, "PreviousLineItemId": { @@ -1755,7 +1777,7 @@ "PreviousOrderId": { "target": "com.amazonaws.outposts#OrderId", "traits": { - "smithy.api#documentation": "The ID of the previous order item.
" + "smithy.api#documentation": "The ID of the previous order.
" } } }, @@ -1769,18 +1791,18 @@ "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "\n The ID of the asset.\n
" + "smithy.api#documentation": "The ID of the asset.
" } }, "MacAddressList": { "target": "com.amazonaws.outposts#MacAddressList", "traits": { - "smithy.api#documentation": "\n The MAC addresses of the asset.\n
" + "smithy.api#documentation": "The MAC addresses of the asset.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about a line item asset.\n
" + "smithy.api#documentation": "Information about a line item asset.
" } }, "com.amazonaws.outposts#LineItemAssetInformationList": { @@ -1955,7 +1977,7 @@ "OutpostIdentifier": { "target": "com.amazonaws.outposts#OutpostIdentifier", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2143,7 +2165,7 @@ "OutpostIdentifierFilter": { "target": "com.amazonaws.outposts#OutpostIdentifier", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpQuery": "OutpostIdentifierFilter" } }, @@ -2170,7 +2192,7 @@ "Orders": { "target": "com.amazonaws.outposts#OrderSummaryListDefinition", "traits": { - "smithy.api#documentation": "\n Information about the orders. \n
" + "smithy.api#documentation": "Information about the orders.
" } }, "NextToken": { @@ -2290,7 +2312,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the Outpost sites for your Amazon Web Services account. Use filters to return specific results.
\nUse filters to return specific results. If you specify multiple filters, the results include only the resources that match \n all of the specified filters. For a filter where you can specify multiple values, the results include \n items that match any of the values that you specify for the filter.
", + "smithy.api#documentation": "Lists the Outpost sites for your Amazon Web Services account. Use filters to return specific\n results.
\nUse filters to return specific results. If you specify multiple filters, the results include only the resources that match \n all of the specified filters. For a filter where you can specify multiple values, the results include \n items that match any of the values that you specify for the filter.
", "smithy.api#http": { "method": "GET", "uri": "/sites", @@ -2603,7 +2625,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostIdOnly", "traits": { - "smithy.api#documentation": "\n The ID of the Outpost in the order.\n
" + "smithy.api#documentation": "The ID of the Outpost in the order.
" } }, "OrderId": { @@ -2615,7 +2637,7 @@ "Status": { "target": "com.amazonaws.outposts#OrderStatus", "traits": { - "smithy.api#documentation": "The status of the order.
\n\n PREPARING
- Order is received and being prepared.
\n IN_PROGRESS
- Order is either being built, shipped, or installed. To get more\n details, see the line item status.
\n COMPLETED
- Order is complete.
\n CANCELLED
- Order is cancelled.
\n ERROR
- Customer should contact support.
The following status are deprecated: RECEIVED
, PENDING
,\n PROCESSING
, INSTALLING
, and FULFILLED
.
The status of the order.
\n\n PREPARING
- Order is received and being prepared.
\n IN_PROGRESS
- Order is either being built, shipped, or installed. To get\n more details, see the line item status.
\n COMPLETED
- Order is complete.
\n CANCELLED
- Order is cancelled.
\n ERROR
- Customer should contact support.
The following status are deprecated: RECEIVED
, PENDING
,\n PROCESSING
, INSTALLING
, and FULFILLED
.
Type of order.
" + "smithy.api#documentation": "The type of order.
" } } }, @@ -2740,48 +2762,48 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostIdOnly", "traits": { - "smithy.api#documentation": "\n The ID of the Outpost.\n
" + "smithy.api#documentation": "The ID of the Outpost.
" } }, "OrderId": { "target": "com.amazonaws.outposts#OrderId", "traits": { - "smithy.api#documentation": "\n The ID of the order.\n
" + "smithy.api#documentation": "The ID of the order.
" } }, "OrderType": { "target": "com.amazonaws.outposts#OrderType", "traits": { - "smithy.api#documentation": "\n The type of order. \n
" + "smithy.api#documentation": "The type of order.
" } }, "Status": { "target": "com.amazonaws.outposts#OrderStatus", "traits": { - "smithy.api#documentation": "The status of the order.
\n\n PREPARING
- Order is received and is being prepared.
\n IN_PROGRESS
- Order is either being built, shipped, or installed. For more\n information, see the LineItem
status.
\n COMPLETED
- Order is complete.
\n CANCELLED
- Order is cancelled.
\n ERROR
- Customer should contact support.
The following statuses are deprecated: RECEIVED
, PENDING
,\n PROCESSING
, INSTALLING
, and FULFILLED
.
The status of the order.
\n\n PREPARING
- Order is received and is being prepared.
\n IN_PROGRESS
- Order is either being built, shipped, or installed. For\n more information, see the LineItem
status.
\n COMPLETED
- Order is complete.
\n CANCELLED
- Order is cancelled.
\n ERROR
- Customer should contact support.
The following statuses are deprecated: RECEIVED
, PENDING
,\n PROCESSING
, INSTALLING
, and FULFILLED
.
\n The status of all line items in the order. \n
" + "smithy.api#documentation": "The status of all line items in the order.
" } }, "OrderSubmissionDate": { "target": "com.amazonaws.outposts#ISO8601Timestamp", "traits": { - "smithy.api#documentation": "\n The submission date for the order. \n
" + "smithy.api#documentation": "The submission date for the order.
" } }, "OrderFulfilledDate": { "target": "com.amazonaws.outposts#ISO8601Timestamp", "traits": { - "smithy.api#documentation": "\n The fulfilment date for the order.\n
" + "smithy.api#documentation": "The fulfilment date for the order.
" } } }, "traits": { - "smithy.api#documentation": "\n A summary of line items in your order.\n
" + "smithy.api#documentation": "A summary of line items in your order.
" } }, "com.amazonaws.outposts#OrderSummaryListDefinition": { @@ -2813,7 +2835,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID of the Outpost.\n
" + "smithy.api#documentation": "The ID of the Outpost.
" } }, "OwnerId": { @@ -2852,7 +2874,7 @@ "SupportedHardwareType": { "target": "com.amazonaws.outposts#SupportedHardwareType", "traits": { - "smithy.api#documentation": "\n The hardware type. \n
" + "smithy.api#documentation": "The hardware type.
" } } }, @@ -3018,7 +3040,7 @@ "name": "outposts" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Web Services Outposts is a fully managed service that extends Amazon Web Services infrastructure, APIs, and tools\n to customer premises. By providing local access to Amazon Web Services managed infrastructure, Amazon Web Services Outposts\n enables customers to build and run applications on premises using the same programming\n interfaces as in Amazon Web Services Regions, while using local compute and storage resources for lower\n latency and local data processing needs.
", + "smithy.api#documentation": "Amazon Web Services Outposts is a fully managed service that extends Amazon Web Services infrastructure, APIs, and tools to\n customer premises. By providing local access to Amazon Web Services managed infrastructure, Amazon Web Services Outposts enables\n customers to build and run applications on premises using the same programming interfaces as\n in Amazon Web Services Regions, while using local compute and storage resources for lower latency and local\n data processing needs.
", "smithy.api#title": "AWS Outposts", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -4236,7 +4258,7 @@ "OpticalStandard": { "target": "com.amazonaws.outposts#OpticalStandard", "traits": { - "smithy.api#documentation": "The type of optical standard used to attach the Outpost to the network. This field is\n dependent on uplink speed, fiber type, and distance to the upstream device. For more information\n about networking requirements for racks, see Network \n in the Amazon Web Services Outposts User Guide.\n
" + "smithy.api#documentation": "The type of optical standard used to attach the Outpost to the network. This field is\n dependent on uplink speed, fiber type, and distance to the upstream device.\n For more information\n about networking requirements for racks, see Network \n in the Amazon Web Services Outposts User Guide.\n
" } }, "MaximumSupportedWeightLbs": { @@ -4325,18 +4347,18 @@ "ShipmentTrackingNumber": { "target": "com.amazonaws.outposts#TrackingId", "traits": { - "smithy.api#documentation": "\n The tracking number of the shipment.\n
" + "smithy.api#documentation": "The tracking number of the shipment.
" } }, "ShipmentCarrier": { "target": "com.amazonaws.outposts#ShipmentCarrier", "traits": { - "smithy.api#documentation": "\n The carrier of the shipment. \n
" + "smithy.api#documentation": "The carrier of the shipment.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about a line item shipment.\n
" + "smithy.api#documentation": "Information about a line item shipment.
" } }, "com.amazonaws.outposts#Site": { @@ -4366,13 +4388,13 @@ "Notes": { "target": "com.amazonaws.outposts#SiteNotes", "traits": { - "smithy.api#documentation": "\n Notes about a site. \n
" + "smithy.api#documentation": "Notes about a site.
" } }, "OperatingAddressCountryCode": { "target": "com.amazonaws.outposts#CountryCode", "traits": { - "smithy.api#documentation": "\n The ISO-3166 two-letter country code where the hardware is installed and powered on.\n
" + "smithy.api#documentation": "The ISO-3166 two-letter country code where the hardware is installed and powered on.\n
" } }, "OperatingAddressStateOrRegion": { @@ -4384,13 +4406,13 @@ "OperatingAddressCity": { "target": "com.amazonaws.outposts#City", "traits": { - "smithy.api#documentation": "\n City where the hardware is installed and powered on.\n
" + "smithy.api#documentation": "City where the hardware is installed and powered on.
" } }, "RackPhysicalProperties": { "target": "com.amazonaws.outposts#RackPhysicalProperties", "traits": { - "smithy.api#documentation": "\n Information about the physical and logistical details for a rack at the site.\n
" + "smithy.api#documentation": "Information about the physical and logistical details for a rack at the site.
" } } }, @@ -4485,7 +4507,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Amazon Web Services uses this action to install Outpost servers.
\n\n Starts the connection required for Outpost server installation. \n
\n\n Use CloudTrail to monitor this action or Amazon Web Services managed policy for Amazon Web Services Outposts to secure it. For \n more information, see \n Amazon Web Services managed policies for Amazon Web Services Outposts and \n Logging Amazon Web Services Outposts API calls with Amazon Web Services CloudTrail in the Amazon Web Services Outposts User Guide.\n
", + "smithy.api#documentation": "\n Amazon Web Services uses this action to install Outpost servers.
\nStarts the connection required for Outpost server installation.
\n\n Use CloudTrail to monitor this action or Amazon Web Services managed policy for Amazon Web Services Outposts to secure it. For \n more information, see \n Amazon Web Services managed policies for Amazon Web Services Outposts and \n Logging Amazon Web Services Outposts API calls with Amazon Web Services CloudTrail in the Amazon Web Services Outposts User Guide.\n
", "smithy.api#http": { "method": "POST", "uri": "/connections", @@ -4499,21 +4521,21 @@ "DeviceSerialNumber": { "target": "com.amazonaws.outposts#DeviceSerialNumber", "traits": { - "smithy.api#documentation": "\n The serial number of the dongle. \n
", + "smithy.api#documentation": "The serial number of the dongle.
", "smithy.api#required": {} } }, "AssetId": { "target": "com.amazonaws.outposts#AssetId", "traits": { - "smithy.api#documentation": "\n The ID of the Outpost server. \n
", + "smithy.api#documentation": "The ID of the Outpost server.
", "smithy.api#required": {} } }, "ClientPublicKey": { "target": "com.amazonaws.outposts#WireGuardPublicKey", "traits": { - "smithy.api#documentation": "\n The public key of the client.\n
", + "smithy.api#documentation": "The public key of the client.
", "smithy.api#required": {} } }, @@ -4521,7 +4543,7 @@ "target": "com.amazonaws.outposts#NetworkInterfaceDeviceIndex", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "\n The device index of the network interface on the Outpost server.\n
", + "smithy.api#documentation": "The device index of the network interface on the Outpost server.
", "smithy.api#required": {} } } @@ -4536,13 +4558,13 @@ "ConnectionId": { "target": "com.amazonaws.outposts#ConnectionId", "traits": { - "smithy.api#documentation": "\n The ID of the connection.\n
" + "smithy.api#documentation": "The ID of the connection.
" } }, "UnderlayIpAddress": { "target": "com.amazonaws.outposts#UnderlayIpAddress", "traits": { - "smithy.api#documentation": "\n The underlay IP address. \n
" + "smithy.api#documentation": "The underlay IP address.
" } } }, @@ -4861,7 +4883,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Updates an Outpost.\n
", + "smithy.api#documentation": "Updates an Outpost.
", "smithy.api#http": { "method": "PATCH", "uri": "/outposts/{OutpostId}", @@ -4875,7 +4897,7 @@ "OutpostId": { "target": "com.amazonaws.outposts#OutpostId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the Outpost.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the Outpost.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4889,7 +4911,7 @@ "SupportedHardwareType": { "target": "com.amazonaws.outposts#SupportedHardwareType", "traits": { - "smithy.api#documentation": "\n The type of hardware for this Outpost.\n
" + "smithy.api#documentation": "The type of hardware for this Outpost.
" } } }, @@ -4968,7 +4990,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the address of the specified site.
\nYou can't update a site address if there is an order in progress. You must wait for the order \n to complete or cancel the order.
\nYou can update the operating address before you place an order at the \n site, or after all Outposts that belong to the site have been deactivated.
", + "smithy.api#documentation": "Updates the address of the specified site.
\nYou can't update a site address if there is an order in progress. You must wait for the\n order to complete or cancel the order.
\nYou can update the operating address before you place an order at the site, or after all\n Outposts that belong to the site have been deactivated.
", "smithy.api#http": { "method": "PUT", "uri": "/sites/{SiteId}/address", @@ -4982,7 +5004,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4990,14 +5012,14 @@ "AddressType": { "target": "com.amazonaws.outposts#AddressType", "traits": { - "smithy.api#documentation": "\n The type of the address.\n
", + "smithy.api#documentation": "The type of the address.
", "smithy.api#required": {} } }, "Address": { "target": "com.amazonaws.outposts#Address", "traits": { - "smithy.api#documentation": "\n The address for the site.\n
", + "smithy.api#documentation": "The address for the site.
", "smithy.api#required": {} } } @@ -5012,13 +5034,13 @@ "AddressType": { "target": "com.amazonaws.outposts#AddressType", "traits": { - "smithy.api#documentation": "\n The type of the address.\n
" + "smithy.api#documentation": "The type of the address.
" } }, "Address": { "target": "com.amazonaws.outposts#Address", "traits": { - "smithy.api#documentation": "\n Information about an address. \n
" + "smithy.api#documentation": "Information about an address.
" } } }, @@ -5032,7 +5054,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5105,7 +5127,7 @@ "SiteId": { "target": "com.amazonaws.outposts#SiteId", "traits": { - "smithy.api#documentation": "\n The ID or the Amazon Resource Name (ARN) of the site.\n
", + "smithy.api#documentation": "The ID or the Amazon Resource Name (ARN) of the site.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5113,7 +5135,7 @@ "PowerDrawKva": { "target": "com.amazonaws.outposts#PowerDrawKva", "traits": { - "smithy.api#documentation": "The power draw, in kVA, available at the hardware placement position for the\n rack.
" + "smithy.api#documentation": "The power draw, in kVA, available at the hardware placement position for the rack.
" } }, "PowerPhase": { @@ -5125,7 +5147,7 @@ "PowerConnector": { "target": "com.amazonaws.outposts#PowerConnector", "traits": { - "smithy.api#documentation": "The power connector that Amazon Web Services should plan to provide for connections to the\n hardware. Note the correlation between PowerPhase
and\n PowerConnector
.
Single-phase AC feed
\n\n L6-30P – (common in US); 30A; single phase
\n\n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase
\nThree-phase AC feed
\n\n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase
\n\n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase
\nThe power connector that Amazon Web Services should plan to provide for connections to the hardware.\n Note the correlation between PowerPhase
and PowerConnector
.
Single-phase AC feed
\n\n L6-30P – (common in US); 30A; single phase
\n\n IEC309 (blue) – P+N+E, 6hr; 32 A; single\n phase
\nThree-phase AC feed
\n\n AH530P7W (red) – 3P+N+E, 7hr; 30A; three\n phase
\n\n AH532P6W (red) – 3P+N+E, 6hr; 32A; three\n phase
\nThe uplink speed the rack should support for the connection to the Region.\n
" + "smithy.api#documentation": "The uplink speed the rack should support for the connection to the Region.
" } }, "UplinkCount": { @@ -5149,19 +5171,19 @@ "FiberOpticCableType": { "target": "com.amazonaws.outposts#FiberOpticCableType", "traits": { - "smithy.api#documentation": "The type of fiber that you will use to attach the Outpost to your network.\n
" + "smithy.api#documentation": "The type of fiber that you will use to attach the Outpost to your network.
" } }, "OpticalStandard": { "target": "com.amazonaws.outposts#OpticalStandard", "traits": { - "smithy.api#documentation": "The type of optical standard that you will use to attach the Outpost to your\n network. This field is dependent on uplink speed, fiber type, and distance to the upstream\n device. For more information\n about networking requirements for racks, see Network \n in the Amazon Web Services Outposts User Guide.\n
\n\n OPTIC_10GBASE_SR
: 10GBASE-SR
\n OPTIC_10GBASE_IR
: 10GBASE-IR
\n OPTIC_10GBASE_LR
: 10GBASE-LR
\n OPTIC_40GBASE_SR
: 40GBASE-SR
\n OPTIC_40GBASE_ESR
: 40GBASE-ESR
\n OPTIC_40GBASE_IR4_LR4L
: 40GBASE-IR (LR4L)
\n OPTIC_40GBASE_LR4
: 40GBASE-LR4
\n OPTIC_100GBASE_SR4
: 100GBASE-SR4
\n OPTIC_100GBASE_CWDM4
: 100GBASE-CWDM4
\n OPTIC_100GBASE_LR4
: 100GBASE-LR4
\n OPTIC_100G_PSM4_MSA
: 100G PSM4 MSA
\n OPTIC_1000BASE_LX
: 1000Base-LX
\n OPTIC_1000BASE_SX
: 1000Base-SX
The type of optical standard that you will use to attach the Outpost to your network. This\n field is dependent on uplink speed, fiber type, and distance to the upstream device.\n For more information\n about networking requirements for racks, see Network \n in the Amazon Web Services Outposts User Guide.\n
\n\n OPTIC_10GBASE_SR
: 10GBASE-SR
\n OPTIC_10GBASE_IR
: 10GBASE-IR
\n OPTIC_10GBASE_LR
: 10GBASE-LR
\n OPTIC_40GBASE_SR
: 40GBASE-SR
\n OPTIC_40GBASE_ESR
: 40GBASE-ESR
\n OPTIC_40GBASE_IR4_LR4L
: 40GBASE-IR (LR4L)
\n OPTIC_40GBASE_LR4
: 40GBASE-LR4
\n OPTIC_100GBASE_SR4
: 100GBASE-SR4
\n OPTIC_100GBASE_CWDM4
: 100GBASE-CWDM4
\n OPTIC_100GBASE_LR4
: 100GBASE-LR4
\n OPTIC_100G_PSM4_MSA
: 100G PSM4 MSA
\n OPTIC_1000BASE_LX
: 1000Base-LX
\n OPTIC_1000BASE_SX
: 1000Base-SX
The maximum rack weight that this site can support. NO_LIMIT
is over\n 2000lbs.
The maximum rack weight that this site can support. NO_LIMIT
is over 2000lbs.\n
Removes one or more attributes, of the same attribute type, from all the endpoints that are associated with an application.
", + "smithy.api#documentation": "Removes one or more custom attributes, of the same attribute type, from the application. Existing endpoints still have the attributes but Amazon Pinpoint will stop capturing new or changed values for these attributes.
", "smithy.api#http": { "method": "PUT", "uri": "/v1/apps/{ApplicationId}/attributes/{AttributeType}", diff --git a/codegen/sdk/aws-models/quicksight.json b/codegen/sdk/aws-models/quicksight.json index 0a51d1c1556..73ef12cf8a1 100644 --- a/codegen/sdk/aws-models/quicksight.json +++ b/codegen/sdk/aws-models/quicksight.json @@ -30287,6 +30287,12 @@ "traits": { "smithy.api#enumValue": "DATASET" } + }, + "TOPIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TOPIC" + } } } }, @@ -36380,6 +36386,12 @@ "traits": { "smithy.api#documentation": "The identity ID for a user in the external login provider.
" } + }, + "Tags": { + "target": "com.amazonaws.quicksight#TagList", + "traits": { + "smithy.api#documentation": "The tags to associate with the user.
" + } } }, "traits": { @@ -42341,7 +42353,7 @@ } ], "traits": { - "smithy.api#documentation": "Assigns one or more tags (key-value pairs) to the specified Amazon QuickSight resource.
\nTags can help you organize and categorize your resources. You can also use them to\n\t\t\tscope user permissions, by granting a user permission to access or change only resources\n\t\t\twith certain tag values. You can use the TagResource
operation with a\n\t\t\tresource that already has tags. If you specify a new tag key for the resource, this tag\n\t\t\tis appended to the list of tags associated with the resource. If you specify a tag key\n\t\t\tthat is already associated with the resource, the new tag value that you specify\n\t\t\treplaces the previous value for that tag.
You can associate as many as 50 tags with a resource. Amazon QuickSight supports tagging on data\n\t\t\tset, data source, dashboard, template, and topic.
\nTagging for Amazon QuickSight works in a similar way to tagging for other Amazon Web Services services, except for\n\t\t\tthe following:
\nYou can't use tags to track costs for Amazon QuickSight. This isn't\n\t\t\t\tpossible because you can't tag the resources that Amazon QuickSight costs are based \n\t\t\t\ton, for example Amazon QuickSight storage capacity (SPICE), number of users, type \n\t\t\t\tof users, and usage metrics.
\nAmazon QuickSight doesn't currently support the tag editor for Resource Groups.
\nAssigns one or more tags (key-value pairs) to the specified Amazon QuickSight resource.
\nTags can help you organize and categorize your resources. You can also use them to\n\t\t\tscope user permissions, by granting a user permission to access or change only resources\n\t\t\twith certain tag values. You can use the TagResource
operation with a\n\t\t\tresource that already has tags. If you specify a new tag key for the resource, this tag\n\t\t\tis appended to the list of tags associated with the resource. If you specify a tag key\n\t\t\tthat is already associated with the resource, the new tag value that you specify\n\t\t\treplaces the previous value for that tag.
You can associate as many as 50 tags with a resource. Amazon QuickSight supports tagging on data\n\t\t\tset, data source, dashboard, template, topic, and user.
\nTagging for Amazon QuickSight works in a similar way to tagging for other Amazon Web Services services, except for\n\t\t\tthe following:
\nTags are used to track costs for users in Amazon QuickSight. You can't tag other resources that Amazon QuickSight costs are based on, such as storage capacoty (SPICE), session usage, alert consumption, or reporting units.
\nAmazon QuickSight doesn't currently support the tag editor for Resource Groups.
\nSpecifies the days since the initiation of an incomplete multipart upload that Amazon S3 will\n wait before permanently removing all parts of the upload. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration in the\n Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will\n wait before permanently removing all parts of the upload. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration in\n the Amazon S3 User Guide.
" } }, "com.amazonaws.s3#AbortMultipartUpload": { @@ -16357,7 +16357,7 @@ "target": "com.amazonaws.s3#CompleteMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "Completes a multipart upload by assembling previously uploaded parts.
\nYou first initiate the multipart upload and then upload all parts using the UploadPart\n operation. After successfully uploading all relevant parts of an upload, you call this\n action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the\n parts in ascending order by part number to create a new object. In the Complete Multipart\n Upload request, you must provide the parts list. You must ensure that the parts list is\n complete. This action concatenates the parts that you provide in the list. For each part in\n the list, you must provide the part number and the ETag
value, returned after\n that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to\n complete. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK
response can\n contain either a success or an error. If you call the S3 API directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throws an exception (or, for\n the SDKs that don't use exceptions, they return the error).
Note that if CompleteMultipartUpload
fails, applications should be prepared\n to retry the failed requests. For more information, see Amazon S3 Error Best\n Practices.
You cannot use Content-Type: application/x-www-form-urlencoded
with\n Complete Multipart Upload requests. Also, if you do not provide a\n Content-Type
header, CompleteMultipartUpload
returns a 200\n OK response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload.
\nFor information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.
\n\n CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
\n
Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.
\n400 Bad Request
\nError code: InvalidPart
\n
Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified entity tag might not have\n matched the part's entity tag.
\n400 Bad Request
\nError code: InvalidPartOrder
\n
Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.
\n400 Bad Request
\nError code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.
\n404 Not Found
\nThe following operations are related to CompleteMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nCompletes a multipart upload by assembling previously uploaded parts.
\nYou first initiate the multipart upload and then upload all parts using the UploadPart\n operation. After successfully uploading all relevant parts of an upload, you call this\n action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the Complete Multipart Upload\n request, you must provide the parts list. You must ensure that the parts list is complete.\n This action concatenates the parts that you provide in the list. For each part in the list,\n you must provide the part number and the ETag
value, returned after that part\n was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to\n complete. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK
response can\n contain either a success or an error. If you call the S3 API directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throws an exception (or, for\n the SDKs that don't use exceptions, they return the error).
Note that if CompleteMultipartUpload
fails, applications should be prepared\n to retry the failed requests. For more information, see Amazon S3 Error Best\n Practices.
You cannot use Content-Type: application/x-www-form-urlencoded
with\n Complete Multipart Upload requests. Also, if you do not provide a\n Content-Type
header, CompleteMultipartUpload
returns a 200\n OK response.
For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload.
\nFor information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.
\n\n CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
\n
Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.
\n400 Bad Request
\nError code: InvalidPart
\n
Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified entity tag might not have\n matched the part's entity tag.
\n400 Bad Request
\nError code: InvalidPartOrder
\n
Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.
\n400 Bad Request
\nError code: NoSuchUpload
\n
Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.
\n404 Not Found
\nThe following operations are related to CompleteMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nCreates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. If you call the S3 API directly, make\n sure to design your application to parse the contents of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throws an exception (or, for the SDKs that don't use exceptions, they return the\n error).
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. For pricing information, see\n Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
When copying an object, you can preserve all metadata (the default) or specify new metadata.\n However, the access control list (ACL) is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.
\nTo specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive
condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.
\n x-amz-website-redirect-location
is unique to each object and must be\n specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the Etag
\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
\n x-amz-copy-source-if-match
condition evaluates to true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response\n code:
\n x-amz-copy-source-if-none-match
condition evaluates to false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When\n copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject
operation, if you want to use a different type\n of encryption setting for the target object, you can use other appropriate\n encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed\n key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it\n writes your data to disks in its data centers and decrypts the data when you access it. If the\n encryption setting in your request is different from the default encryption configuration\n of the destination bucket, the encryption setting in your request takes precedence. If the\n source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary\n encryption information in your request so that Amazon S3 can decrypt the object for copying. For\n more information about server-side encryption, see Using Server-Side\n Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\nWhen copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.
\nIf the bucket that you're copying objects to uses the bucket owner enforced setting for\n S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use\n this setting only accept PUT
requests that don't specify an ACL or PUT
requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nWhen copying an object, if it has a checksum, that checksum will be copied to the new\n object by default. When you copy the object over, you can optionally specify a different\n checksum algorithm to use with the x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of an object\n that is already stored in Amazon S3 by using the StorageClass
parameter. For more\n information, see Storage Classes in the\n Amazon S3 User Guide.
If the source object's storage class is GLACIER, you must restore a copy of\n this object before you can use it as a source object for the copy operation. For\n more information, see RestoreObject. For\n more information, see Copying\n Objects.
\nBy default, x-amz-copy-source
header identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.
\nThe following operations are related to CopyObject
:
Creates a copy of an object that is already stored in Amazon S3.
\nYou can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.
\nAll copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account.
\nA copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK
response. This means that a 200 OK
\n response can contain either a success or an error. If you call the S3 API directly, make\n sure to design your application to parse the contents of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throws an exception (or, for the SDKs that don't use exceptions, they return the\n error).
If the copy is successful, you receive a response with information about the copied\n object.
\nIf the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.
\nThe copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. For pricing information, see\n Amazon S3 pricing.
\nAmazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request
error. For more information, see Transfer\n Acceleration.
When copying an object, you can preserve all metadata (the default) or specify\n new metadata. However, the access control list (ACL) is not preserved and is set\n to private for the user making the request. To override the default ACL setting,\n specify a new ACL when generating a copy request. For more information, see Using\n ACLs.
\nTo specify whether you want the object metadata copied from the source object\n or replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive
header. When you grant permissions, you\n can use the s3:x-amz-metadata-directive
condition key to enforce\n certain metadata behavior when objects are uploaded. For more information, see\n Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list\n of Amazon S3-specific condition keys, see Actions, Resources, and Condition\n Keys for Amazon S3.
\n x-amz-website-redirect-location
is unique to each object and\n must be specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the\n Etag
matches or whether the object was modified before or after a\n specified date, use the following request parameters:
\n x-amz-copy-source-if-match
\n
\n x-amz-copy-source-if-none-match
\n
\n x-amz-copy-source-if-unmodified-since
\n
\n x-amz-copy-source-if-modified-since
\n
If both the x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the\n request and evaluate as follows, Amazon S3 returns 200 OK
and copies the\n data:
\n x-amz-copy-source-if-match
condition evaluates to\n true
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
If both the x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the\n request and evaluate as follows, Amazon S3 returns the 412 Precondition\n Failed
response code:
\n x-amz-copy-source-if-none-match
condition evaluates to\n false
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
All headers with the x-amz-
prefix, including\n x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.
\nWhen you perform a CopyObject
operation, if you want to use a\n different type of encryption setting for the target object, you can use other\n appropriate encryption-related headers to encrypt the target object with a\n KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If\n the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the\n necessary encryption information in your request so that Amazon S3 can decrypt the\n object for copying. For more information about server-side encryption, see Using\n Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.
\nWhen copying an object, you can optionally use headers to grant ACL-based\n permissions. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions\n are then added to the ACL on the object. For more information, see Access Control\n List (ACL) Overview and Managing ACLs Using the REST\n API.
\nIf the bucket that you're copying objects to uses the bucket owner enforced\n setting for S3 Object Ownership, ACLs are disabled and no longer affect\n permissions. Buckets that use this setting only accept PUT
requests\n that don't specify an ACL or PUT
requests that specify bucket owner\n full control ACLs, such as the bucket-owner-full-control
canned ACL\n or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling\n ownership of objects and disabling ACLs in the\n Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for Object Ownership,\n all objects written to the bucket by any account will be owned by the bucket\n owner.
\nWhen copying an object, if it has a checksum, that checksum will be copied to\n the new object by default. When you copy the object over, you can optionally\n specify a different checksum algorithm to use with the\n x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of\n an object that is already stored in Amazon S3 by using the StorageClass
\n parameter. For more information, see Storage Classes in\n the Amazon S3 User Guide.
If the source object's storage class is GLACIER or\n DEEP_ARCHIVE, or the object's storage class is\n INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is\n Archive Access or Deep Archive Access, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more\n information, see RestoreObject. For\n more information, see Copying\n Objects.
\nBy default, x-amz-copy-source
header identifies the current\n version of an object to copy. If the current version is a delete marker, Amazon S3\n behaves as if the object was deleted. To copy a different version, use the\n versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version\n ID for the object being copied. This version ID is different from the version ID\n of the source object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version\n ID that Amazon S3 generates is always null.
\nThe following operations are related to CopyObject
:
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
", + "smithy.api#documentation": "If the x-amz-storage-class
header is not used, the copied object will be stored in the\n STANDARD Storage Class by default. The STANDARD storage class provides high durability and\n high availability. Depending on performance needs, you can specify a different Storage\n Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see\n Storage\n Classes in the Amazon S3 User Guide.
Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -17282,7 +17282,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming\n rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.
\nIf you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1
Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1
as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of\n buckets.
In addition to s3:CreateBucket
, the following permissions are required when\n your CreateBucket
request includes specific headers:
\n Access control lists (ACLs) - If your CreateBucket
request\n specifies access control list (ACL) permissions and the ACL is public-read, public-read-write,\n authenticated-read, or if you specify access permissions explicitly through any other\n ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions\n are needed. If the ACL for the CreateBucket
request is private or if the request doesn't\n specify any ACLs, only s3:CreateBucket
permission is needed.
\n Object Lock - If ObjectLockEnabledForBucket
is set to true in your\n CreateBucket
request,\n s3:PutBucketObjectLockConfiguration
and\n s3:PutBucketVersioning
permissions are required.
\n S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the\n s3:PutBucketOwnershipControls
permission is required. By default, ObjectOwnership
is set to BucketOWnerEnforced
and ACLs are disabled. We recommend keeping\n ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership
setting, you can use the \n x-amz-object-ownership
header in your CreateBucket
request to set the ObjectOwnership
setting of your choice.\n For more information about S3 Object Ownership, see Controlling object\n ownership in the Amazon S3 User Guide.
\n S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the \n DeletePublicAccessBlock
\n API. To use this operation, you must have the\n s3:PutBucketPublicAccessBlock
permission. By default, all Block\n Public Access settings are enabled for new buckets. To avoid inadvertent exposure of\n your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public\n access to your Amazon S3 storage in the Amazon S3 User Guide.
If your CreateBucket
request sets BucketOwnerEnforced
for Amazon S3 Object Ownership\n and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400
error and returns the InvalidBucketAcLWithObjectOwnership
error code. For more information,\n see Setting Object\n Ownership on an existing bucket in the Amazon S3 User Guide.
The following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nCreates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.
\nNot every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming\n rules.
\nIf you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
\nBy default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. To constrain the bucket creation to a\n specific Region, you can use \n LocationConstraint
\n condition key. You might choose a Region to\n optimize latency, minimize costs, or address regulatory requirements. For example, if you\n reside in Europe, you will probably find it advantageous to create buckets in the Europe\n (Ireland) Region. For more information, see Accessing a\n bucket.
If you send your create bucket request to the s3.amazonaws.com
endpoint,\n the request goes to the us-east-1
Region. Accordingly, the signature\n calculations in Signature Version 4 must use us-east-1
as the Region, even\n if the location constraint in the request specifies another Region where the bucket is\n to be created. If you create a bucket in a Region other than US East (N. Virginia), your\n application must be able to handle 307 redirect. For more information, see Virtual hosting of\n buckets.
In addition to s3:CreateBucket
, the following permissions are\n required when your CreateBucket
request includes specific\n headers:
\n Access control lists (ACLs) - If your\n CreateBucket
request specifies access control list (ACL)\n permissions and the ACL is public-read, public-read-write,\n authenticated-read, or if you specify access permissions explicitly through\n any other ACL, both s3:CreateBucket
and\n s3:PutBucketAcl
permissions are needed. If the ACL for the\n CreateBucket
request is private or if the request doesn't\n specify any ACLs, only s3:CreateBucket
permission is needed.\n
\n Object Lock - If\n ObjectLockEnabledForBucket
is set to true in your\n CreateBucket
request,\n s3:PutBucketObjectLockConfiguration
and\n s3:PutBucketVersioning
permissions are required.
\n S3 Object Ownership - If your\n CreateBucket
request includes the\n x-amz-object-ownership
header, then the\n s3:PutBucketOwnershipControls
permission is required. By\n default, ObjectOwnership
is set to\n BucketOWnerEnforced
and ACLs are disabled. We recommend\n keeping ACLs disabled, except in uncommon use cases where you must control\n access for each object individually. If you want to change the\n ObjectOwnership
setting, you can use the\n x-amz-object-ownership
header in your\n CreateBucket
request to set the ObjectOwnership
\n setting of your choice. For more information about S3 Object Ownership, see\n Controlling\n object ownership in the\n Amazon S3 User Guide.
\n S3 Block Public Access - If your\n specific use case requires granting public access to your S3 resources, you\n can disable Block Public Access. You can create a new bucket with Block\n Public Access enabled, then separately call the \n DeletePublicAccessBlock
\n API. To use this operation, you must have the\n s3:PutBucketPublicAccessBlock
permission. By default, all\n Block Public Access settings are enabled for new buckets. To avoid\n inadvertent exposure of your resources, we recommend keeping the S3 Block\n Public Access settings enabled. For more information about S3 Block Public\n Access, see Blocking\n public access to your Amazon S3 storage in the\n Amazon S3 User Guide.
If your CreateBucket
request sets BucketOwnerEnforced
for\n Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external\n Amazon Web Services account, your request fails with a 400
error and returns the\n InvalidBucketAcLWithObjectOwnership
error code. For more information,\n see Setting Object\n Ownership on an existing bucket in the Amazon S3 User Guide.\n
The following operations are related to CreateBucket
:
\n PutObject\n
\n\n DeleteBucket\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\nFor more information about multipart uploads, see Multipart Upload Overview.
\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
\nFor information about the permissions required to use the multipart upload API, see\n Multipart\n Upload and Permissions.
\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nServer-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
. You can request that Amazon S3\n save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key\n (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key\n (SSE-C).
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role belongs\n to a different account than the key, then you must have the permissions on both the key\n policy and your IAM user or role.
\nFor more information, see Protecting Data Using Server-Side\n Encryption.
\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned\n ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nAmazon S3 encrypts data\n by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can request that Amazon S3 encrypts\n data at rest by using server-side encryption with other key options. The option you use depends on\n whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys\n (SSE-C).
\nUse KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you\n want Amazon Web Services to manage the keys used to encrypt data, specify the following\n headers in the request.
\n x-amz-server-side-encryption
\n
\n x-amz-server-side-encryption-aws-kms-key-id
\n
\n x-amz-server-side-encryption-context
\n
If you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to\n protect the data.
All GET
and PUT
requests for an object protected\n by KMS fail if you don't make them by using Secure Sockets Layer (SSL),\n Transport Layer Security (TLS), or Signature Version 4.
For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys.
\nUse customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C).
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then\n added to the access control list (ACL) on the object. For more information, see\n Using ACLs. With this operation, you can grant access permissions\n using one of the following two methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview. In the header, you specify a list of grantees who get\n the specific permission. To grant permissions explicitly, use:
\n\n x-amz-grant-read
\n
\n x-amz-grant-write
\n
\n x-amz-grant-read-acp
\n
\n x-amz-grant-write-acp
\n
\n x-amz-grant-full-control
\n
You specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\nFor more information about multipart uploads, see Multipart Upload Overview.
\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.
\nFor information about the permissions required to use the multipart upload API, see\n Multipart\n Upload and Permissions.
\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nServer-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
. You can request that Amazon S3\n save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key\n (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key\n (SSE-C).
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role belongs\n to a different account than the key, then you must have the permissions on both the key\n policy and your IAM user or role.
\nFor more information, see Protecting Data Using Server-Side\n Encryption.
\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned\n ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nAmazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key\n (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it\n when you access it. You can request that Amazon S3 encrypts data at rest by using\n server-side encryption with other key options. The option you use depends on\n whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys\n (SSE-C).
\nUse KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.
\n x-amz-server-side-encryption
\n
\n x-amz-server-side-encryption-aws-kms-key-id
\n
\n x-amz-server-side-encryption-context
\n
If you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to\n protect the data.
All GET
and PUT
requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4.
For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys.
\nUse customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C).
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual\n Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then\n added to the access control list (ACL) on the object. For more information, see\n Using ACLs. With this operation, you can grant access permissions\n using one of the following two methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access Control List (ACL)\n Overview. In the header, you specify a list of grantees who get\n the specific permission. To grant permissions explicitly, use:
\n\n x-amz-grant-read
\n
\n x-amz-grant-write
\n
\n x-amz-grant-read-acp
\n
\n x-amz-grant-write-acp
\n
\n x-amz-grant-full-control
\n
You specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nIf the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, the response includes this header. The header indicates when the initiated\n multipart upload becomes eligible for an abort operation. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
\nThe response also includes the x-amz-abort-rule-id
header that provides the\n ID of the lifecycle configuration rule that defines this action.
If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, the response includes this header. The header indicates when the initiated\n multipart upload becomes eligible for an abort operation. For more information, see \n Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.
\nThe response also includes the x-amz-abort-rule-id
header that provides the\n ID of the lifecycle configuration rule that defines this action.
Specifies the ID of the symmetric encryption customer managed key to use for object encryption.\n All GET and PUT requests for an object protected by KMS will fail if they're not made via\n SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.\n All GET and PUT requests for an object protected by KMS will fail if they're not made via\n SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -17983,7 +17983,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "This implementation of the DELETE action resets the default encryption for the\n bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the\n bucket default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to DeleteBucketEncryption
:
\n PutBucketEncryption\n
\n\n GetBucketEncryption\n
\nThis implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket\n default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to DeleteBucketEncryption
:
\n PutBucketEncryption\n
\n\n GetBucketEncryption\n
\nThis implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n Amazon Web Services account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
For more information about bucket policies, see Using Bucket Policies and\n UserPolicies.
\nThe following operations are related to DeleteBucketPolicy
\n
\n CreateBucket\n
\n\n DeleteObject\n
\nThis implementation of the DELETE action uses the policy subresource to delete the\n policy of a specified bucket. If you are using an identity other than the root user of the\n Amazon Web Services account that owns the bucket, the calling identity must have the\n DeleteBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.
For more information about bucket policies, see Using Bucket Policies and\n UserPolicies.
\nThe following operations are related to DeleteBucketPolicy
\n
\n CreateBucket\n
\n\n DeleteObject\n
\nRemoves the null version (if there is one) of an object and inserts a delete marker,\n which becomes the latest version of the object. If there isn't a null version, Amazon S3 does\n not remove any objects but will still respond that the command was successful.
\nTo remove a specific version, you must use the version Id subresource. Using this\n subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa
request\n header in the DELETE versionId
request. Requests that include\n x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample\n requests that use versioning, see Sample\n Request.
\nYou can delete objects by explicitly calling DELETE Object or configure its lifecycle\n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject
, s3:DeleteObjectVersion
, and\n s3:PutLifeCycleConfiguration
actions.
The following action is related to DeleteObject
:
\n PutObject\n
\nSpecifies whether the versioned object that was permanently deleted was (true) or was\n not (false) a delete marker.
", + "smithy.api#documentation": "Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.
", "smithy.api#httpHeader": "x-amz-delete-marker" } }, @@ -18796,7 +18795,41 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "This action enables you to delete multiple objects from a bucket using a single HTTP\n request. If you know the object keys that you want to delete, then this action provides a\n suitable alternative to sending individual delete requests, reducing per-request\n overhead.
\nThe request contains a list of up to 1000 keys that you want to delete. In the XML, you\n provide the object key names, and optionally, version IDs if you want to delete a specific\n version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a\n delete action and returns the result of that delete, success, or failure, in the response.\n Note that if the object specified in the request is not found, Amazon S3 returns the result as\n deleted.
\nThe action supports two modes for the response: verbose and quiet. By default, the\n action uses verbose mode in which the response includes the result of deletion of each key\n in your request. In quiet mode the response includes only keys where the delete action\n encountered an error. For a successful deletion, the action does not return any information\n about the delete in the response body.
\nWhen performing this action on an MFA Delete enabled bucket, that attempts to delete any\n versioned objects, you must include an MFA token. If you do not provide one, the entire\n request will fail, even if there are non-versioned objects you are trying to delete. If you\n provide an invalid token, whether there are versioned keys in the request or not, the\n entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA\n Delete.
\nFinally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in\n transit.
\nThe following operations are related to DeleteObjects
:
\n UploadPart\n
\n\n ListParts\n
\n\n AbortMultipartUpload\n
\nThis action enables you to delete multiple objects from a bucket using a single HTTP\n request. If you know the object keys that you want to delete, then this action provides a\n suitable alternative to sending individual delete requests, reducing per-request\n overhead.
\nThe request contains a list of up to 1000 keys that you want to delete. In the XML, you\n provide the object key names, and optionally, version IDs if you want to delete a specific\n version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a\n delete action and returns the result of that delete, success, or failure, in the response.\n Note that if the object specified in the request is not found, Amazon S3 returns the result as\n deleted.
\nThe action supports two modes for the response: verbose and quiet. By default, the\n action uses verbose mode in which the response includes the result of deletion of each key\n in your request. In quiet mode the response includes only keys where the delete action\n encountered an error. For a successful deletion, the action does not return any information\n about the delete in the response body.
\nWhen performing this action on an MFA Delete enabled bucket, that attempts to delete any\n versioned objects, you must include an MFA token. If you do not provide one, the entire\n request will fail, even if there are non-versioned objects you are trying to delete. If you\n provide an invalid token, whether there are versioned keys in the request or not, the\n entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA\n Delete.
\nFinally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3\n uses the header value to ensure that your request body has not been altered in\n transit.
\nThe following operations are related to DeleteObjects
:
\n UploadPart\n
\n\n ListParts\n
\n\n AbortMultipartUpload\n
\nSpecifies whether the versioned object that was permanently deleted was (true) or was\n not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or\n not (false) a delete marker was created.
" + "smithy.api#documentation": "Indicates whether the specified object version that was permanently deleted was (true) or was\n not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or\n not (false) the current version of the object is a delete marker.
" } }, "DeleteMarkerVersionId": { @@ -19535,7 +19568,7 @@ "target": "com.amazonaws.s3#GetBucketAccelerateConfigurationOutput" }, "traits": { - "smithy.api#documentation": "This implementation of the GET action uses the accelerate
subresource to\n return the Transfer Acceleration state of a bucket, which is either Enabled
or\n Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that\n enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the\n s3:GetAccelerateConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or\n Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that\n has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state\n has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in\n the Amazon S3 User Guide.
\nThe following operations are related to GetBucketAccelerateConfiguration
:
This implementation of the GET action uses the accelerate
subresource to\n return the Transfer Acceleration state of a bucket, which is either Enabled
or\n Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that\n enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the\n s3:GetAccelerateConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or\n Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that\n has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state\n has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in\n the Amazon S3 User Guide.
\nThe following operations are related to\n GetBucketAccelerateConfiguration
:
This implementation of the GET action returns an analytics configuration (identified by\n the analytics configuration ID) from the bucket.
\nTo use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis in the Amazon S3 User Guide.
\nThe following operations are related to GetBucketAnalyticsConfiguration
:
This implementation of the GET action returns an analytics configuration (identified by\n the analytics configuration ID) from the bucket.
\nTo use this operation, you must have permissions to perform the\n s3:GetAnalyticsConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class\n Analysis in the Amazon S3 User Guide.
\nThe following operations are related to\n GetBucketAnalyticsConfiguration
:
Returns the policy of a specified bucket. If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n GetBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
To use this API operation against an access point, provide the alias of the access point in place of the bucket name.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
For more information about bucket policies, see Using Bucket Policies and User\n Policies.
\nThe following action is related to GetBucketPolicy
:
\n GetObject\n
\nReturns the policy of a specified bucket. If you are using an identity other than the\n root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n GetBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.
To use this API operation against an access point, provide the alias of the access point in place of the bucket name.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
For more information about bucket policies, see Using Bucket Policies and User\n Policies.
\nThe following action is related to GetBucketPolicy
:
\n GetObject\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification.
For more information about returning the ACL of an object, see GetObjectAcl.
\nIf the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use\n these types of keys, you’ll get an HTTP 400 Bad Request error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nAssuming you have the relevant permission to read object tags, the response also returns\n the x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions in a\n Policy. If the object that you request doesn’t exist, the error that Amazon S3 returns depends\n on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 (Not Found) error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 (\"access denied\") error.
By default, the GET
action returns the current version of an object. To return a\n different version, use the versionId
subresource.
If you supply a versionId
, you need the\n s3:GetObjectVersion
permission to access a specific version of an\n object. If you request a specific version, you do not need to have the\n s3:GetObject
permission. If you request the current version\n without a specific version ID, only s3:GetObject
permission is\n required. s3:GetObjectVersion
permission won't be required.
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\nThere are times when you want to override certain response header values in a GET
\n response. For example, you might override the Content-Disposition
response\n header value in your GET
request.
You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET
response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET
response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
If both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host\n Header Bucket Specification.
For more information about returning the ACL of an object, see GetObjectAcl.
\nIf the object you are retrieving is stored in the S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectState
error. For information about restoring archived objects,\n see Restoring\n Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use\n these types of keys, you’ll get an HTTP 400 Bad Request error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nAssuming you have the relevant permission to read object tags, the response also returns\n the x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
You need the relevant read object (or version) permission for this operation.\n For more information, see Specifying Permissions in\n a Policy. If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket
\n permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 (Not Found) error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an\n HTTP status code 403 (\"access denied\") error.
By default, the GET
action returns the current version of an\n object. To return a different version, use the versionId
\n subresource.
If you supply a versionId
, you need the\n s3:GetObjectVersion
permission to access a specific\n version of an object. If you request a specific version, you do not need\n to have the s3:GetObject
permission. If you request the\n current version without a specific version ID, only\n s3:GetObject
permission is required.\n s3:GetObjectVersion
permission won't be required.
If the current version of the object is a delete marker, Amazon S3 behaves\n as if the object was deleted and includes x-amz-delete-marker:\n true
in the response.
For more information about versioning, see PutBucketVersioning.
\nThere are times when you want to override certain response header values in a\n GET
response. For example, you might override the\n Content-Disposition
response header value in your GET
\n request.
You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request,\n that is, when status code 200 OK is returned. The set of headers you can override\n using these parameters is a subset of the headers that Amazon S3 accepts when you\n create an object. The response headers that you can override for the\n GET
response are Content-Type
,\n Content-Language
, Expires
,\n Cache-Control
, Content-Disposition
, and\n Content-Encoding
. To override these header values in the\n GET
response, you use the following request parameters.
You must sign the request, either using an Authorization header or a\n presigned URL, when using these parameters. They cannot be used with an\n unsigned (anonymous) request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
If both of the If-Match
and If-Unmodified-Since
\n headers are present in the request as follows: If-Match
condition\n evaluates to true
, and; If-Unmodified-Since
condition\n evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
\n headers are present in the request as follows: If-None-Match
\n condition evaluates to false
, and; If-Modified-Since
\n condition evaluates to true
; then, S3 returns 304 Not Modified\n response code.
For more information about conditional requests, see RFC 7232.
\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves all the metadata from an object without returning the object itself. This\n action is useful if you're interested only in an object's metadata. To use\n GetObjectAttributes
, you must have READ access to the object.
\n GetObjectAttributes
combines the functionality of HeadObject
\n and ListParts
. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes
.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nEncryption request headers, such as x-amz-server-side-encryption
,\n should not be sent for GET requests if your object uses server-side encryption\n with Amazon Web Services KMS keys stored in Amazon Web Services Key Management Service (SSE-KMS) or\n server-side encryption with Amazon S3 managed keys (SSE-S3). If your object does use\n these types of keys, you'll get an HTTP 400 Bad Request
error.
The last modified property in this case is the creation date of the\n object.
\nConsider the following when using request headers:
\n If both of the If-Match
and If-Unmodified-Since
headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK
and the data requested:
\n If-Match
condition evaluates to true
.
\n If-Unmodified-Since
condition evaluates to\n false
.
If both of the If-None-Match
and If-Modified-Since
\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified
:
\n If-None-Match
condition evaluates to false
.
\n If-Modified-Since
condition evaluates to\n true
.
For more information about conditional requests, see RFC 7232.
\nThe permissions that you need to use this operation depend on whether the bucket is\n versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
\n and s3:GetObjectVersionAttributes
permissions for this operation. If the\n bucket is not versioned, you need the s3:GetObject
and\n s3:GetObjectAttributes
permissions. For more information, see Specifying\n Permissions in a Policy in the Amazon S3 User Guide. If the\n object that you request does not exist, the error Amazon S3 returns depends on whether you also\n have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 Forbidden
(\"access denied\") error.
The following actions are related to GetObjectAttributes
:
\n GetObject\n
\n\n GetObjectAcl\n
\n\n GetObjectLegalHold\n
\n\n GetObjectRetention\n
\n\n GetObjectTagging\n
\n\n HeadObject\n
\n\n ListParts\n
\nRetrieves all the metadata from an object without returning the object itself. This\n action is useful if you're interested only in an object's metadata. To use\n GetObjectAttributes
, you must have READ access to the object.
\n GetObjectAttributes
combines the functionality of HeadObject
\n and ListParts
. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes
.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nEncryption request headers, such as x-amz-server-side-encryption
,\n should not be sent for GET requests if your object uses server-side encryption\n with Amazon Web Services KMS keys stored in Amazon Web Services Key Management Service (SSE-KMS) or\n server-side encryption with Amazon S3 managed keys (SSE-S3). If your object does use\n these types of keys, you'll get an HTTP 400 Bad Request
error.
The last modified property in this case is the creation date of the\n object.
\nConsider the following when using request headers:
\n If both of the If-Match
and If-Unmodified-Since
headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK
and the data requested:
\n If-Match
condition evaluates to true
.
\n If-Unmodified-Since
condition evaluates to\n false
.
If both of the If-None-Match
and If-Modified-Since
\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified
:
\n If-None-Match
condition evaluates to false
.
\n If-Modified-Since
condition evaluates to\n true
.
For more information about conditional requests, see RFC 7232.
\nThe permissions that you need to use this operation depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion
and s3:GetObjectVersionAttributes
\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject
and s3:GetObjectAttributes
permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found
(\"no such key\")\n error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 Forbidden
(\"access denied\")\n error.
The following actions are related to GetObjectAttributes
:
\n GetObject\n
\n\n GetObjectAcl\n
\n\n GetObjectLegalHold\n
\n\n GetObjectRetention\n
\n\n GetObjectTagging\n
\n\n HeadObject\n
\n\n ListParts\n
\nSpecifies the fields at the root level that you want returned in the\n response. Fields that you do not specify are not returned.
", + "smithy.api#documentation": "Specifies the fields at the root level that you want returned in the response. Fields\n that you do not specify are not returned.
", "smithy.api#httpHeader": "x-amz-object-attributes", "smithy.api#required": {} } @@ -21972,19 +22005,22 @@ "smithy.api#documentation": "Returns the tag-set of an object. You send the GET request against the tagging\n subresource associated with the object.
\nTo use this operation, you must have permission to perform the\n s3:GetObjectTagging
action. By default, the GET action returns information\n about current version of an object. For a versioned bucket, you can have multiple versions\n of an object in your bucket. To retrieve tags of any other version, use the versionId query\n parameter. You also need permission for the s3:GetObjectVersionTagging
\n action.
By default, the bucket owner has this permission and can grant this permission to\n others.
\nFor information about the Amazon S3 object tagging feature, see Object Tagging.
\nThe following actions are related to GetObjectTagging
:
\n DeleteObjectTagging\n
\n\n GetObjectAttributes\n
\n\n PutObjectTagging\n
\nThis action is useful to determine if a bucket exists and you have permission to access\n it. The action returns a 200 OK
if the bucket exists and you have permission\n to access it.
If the bucket does not exist or you do not have permission to access it, the\n HEAD
request returns a generic 400 Bad Request
, 403\n Forbidden
or 404 Not Found
code. A message body is not included, so\n you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
To use this API operation against an access point, you must provide the alias of the access point in place of the\n bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to\n the access point hostname. The access point hostname takes the form\n AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.\n When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more\n information, see Using access points.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
This action is useful to determine if a bucket exists and you have permission to access\n it. The action returns a 200 OK
if the bucket exists and you have permission\n to access it.
If the bucket does not exist or you do not have permission to access it, the\n HEAD
request returns a generic 400 Bad Request
, 403\n Forbidden
or 404 Not Found
code. A message body is not included, so\n you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the\n s3:ListBucket
action. The bucket owner has this permission by default and\n can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
To use this API operation against an access point, you must provide the alias of the access point in\n place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct\n requests to the access point hostname. The access point hostname takes the form\n AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.\n When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more\n information, see Using access points.
\nTo use this API operation against an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \nIf the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \nFor more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen you use this action with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. \n If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError
is returned. \n For more information about InvalidAccessPointAliasError
, see List of\n Error Codes.
When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The bucket name.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen you use this action with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the\n bucket name. If the Object Lambda access point alias in a request is not valid, the error code\n InvalidAccessPointAliasError
is returned. For more information about\n InvalidAccessPointAliasError
, see List of Error\n Codes.
When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form \n AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide.
The HEAD
action retrieves metadata from an object without returning the object itself.\n This action is useful if you're only interested in an object's metadata. To use HEAD
, you\n must have READ access to the object.
A HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not\n Found
code. It is not possible to retrieve the exact exception beyond these error\n codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
,\n should not be sent for GET
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). If your object does use these types of keys,\n you’ll get an HTTP 400 Bad Request error.
The last modified property in this case is the creation date of the\n object.
\nRequest headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Actions, resources, and condition keys for Amazon S3. \n If the object you request doesn't exist, the error that Amazon S3 returns depends\n on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 error.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nThe HEAD
action retrieves metadata from an object without returning the\n object itself. This action is useful if you're only interested in an object's metadata. To\n use HEAD
, you must have READ access to the object.
A HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 400 Bad Request
, 403 Forbidden
or 404 Not\n Found
code. It is not possible to retrieve the exact exception beyond these error\n codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\n\n x-amz-server-side-encryption-customer-algorithm
\n
\n x-amz-server-side-encryption-customer-key
\n
\n x-amz-server-side-encryption-customer-key-MD5
\n
For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
,\n should not be sent for GET
requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). If your object does use these types of keys,\n you’ll get an HTTP 400 Bad Request error.
The last modified property in this case is the creation date of the\n object.
\nRequest headers are limited to 8 KB in size. For more information, see Common\n Request Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\nYou need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3. If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3\n returns an HTTP status code 404 error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns\n an HTTP status code 403 error.
The following actions are related to HeadObject
:
\n GetObject\n
\n\n GetObjectAttributes\n
\nIndicates at what date the object is to be moved or deleted. The date value must conform to the ISO 8601 format. \n The time is always midnight UTC.
" + "smithy.api#documentation": "Indicates at what date the object is to be moved or deleted. The date value must conform\n to the ISO 8601 format. The time is always midnight UTC.
" } }, "Days": { @@ -24109,6 +24145,47 @@ }, "traits": { "smithy.api#documentation": "This action lists in-progress multipart uploads. An in-progress multipart upload is a\n multipart upload that has been initiated using the Initiate Multipart Upload request, but\n has not yet been completed or aborted.
\nThis action returns at most 1,000 multipart uploads in the response. 1,000 multipart\n uploads is the maximum number of uploads a response can include, which is also the default\n value. You can further limit the number of uploads in a response by specifying the\n max-uploads
parameter in the response. If additional multipart uploads\n satisfy the list criteria, the response will contain an IsTruncated
element\n with the value true. To list the additional multipart uploads, use the\n key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more\n than one multipart upload using the same object key, then uploads in the response are first\n sorted by key. Additionally, uploads are sorted in ascending order within each key by the\n upload initiation time.
\nFor more information on multipart uploads, see Uploading Objects Using Multipart\n Upload.
\nFor information on permissions required to use the multipart upload API, see Multipart Upload\n and Permissions.
\nThe following operations are related to ListMultipartUploads
:
\n UploadPart\n
\n\n ListParts\n
\n\n AbortMultipartUpload\n
\nSpecifies the optional fields that you want returned in the response.\n Fields that you do not specify are not returned.
", + "smithy.api#documentation": "Specifies the optional fields that you want returned in the response. Fields that you do\n not specify are not returned.
", "smithy.api#httpHeader": "x-amz-optional-object-attributes" } } @@ -24651,7 +24728,7 @@ "Marker": { "target": "com.amazonaws.s3#Marker", "traits": { - "smithy.api#documentation": "Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after\n this specified key. Marker can be any key in the bucket.
", + "smithy.api#documentation": "Marker is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this\n specified key. Marker can be any key in the bucket.
", "smithy.api#httpQuery": "marker" } }, @@ -24659,7 +24736,7 @@ "target": "com.amazonaws.s3#MaxKeys", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Sets the maximum number of keys returned in the response. By default, the action returns\n up to 1,000 key names. The response might contain fewer keys but will never contain more.
", + "smithy.api#documentation": "Sets the maximum number of keys returned in the response. By default, the action returns\n up to 1,000 key names. The response might contain fewer keys but will never contain more.\n
", "smithy.api#httpQuery": "max-keys" } }, @@ -24687,7 +24764,7 @@ "OptionalObjectAttributes": { "target": "com.amazonaws.s3#OptionalObjectAttributesList", "traits": { - "smithy.api#documentation": "Specifies the optional fields that you want returned in the response.\n Fields that you do not specify are not returned.
", + "smithy.api#documentation": "Specifies the optional fields that you want returned in the response. Fields that you do\n not specify are not returned.
", "smithy.api#httpHeader": "x-amz-optional-object-attributes" } } @@ -24897,7 +24974,7 @@ "OptionalObjectAttributes": { "target": "com.amazonaws.s3#OptionalObjectAttributesList", "traits": { - "smithy.api#documentation": "Specifies the optional fields that you want returned in the response.\n Fields that you do not specify are not returned.
", + "smithy.api#documentation": "Specifies the optional fields that you want returned in the response. Fields that you do\n not specify are not returned.
", "smithy.api#httpHeader": "x-amz-optional-object-attributes" } } @@ -24935,7 +25012,7 @@ "AbortDate": { "target": "com.amazonaws.s3#AbortDate", "traits": { - "smithy.api#documentation": "If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, then the response includes this header indicating when the initiated multipart\n upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
\nThe response will also include the x-amz-abort-rule-id
header that will\n provide the ID of the lifecycle configuration rule that defines this action.
If the bucket has a lifecycle rule configured with an action to abort incomplete\n multipart uploads and the prefix in the lifecycle rule matches the object name in the\n request, then the response includes this header indicating when the initiated multipart\n upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.
\nThe response will also include the x-amz-abort-rule-id
header that will\n provide the ID of the lifecycle configuration rule that defines this action.
Specifies object key name filtering rules. For information about key name filtering, see\n Configuring event notifications using object key name filtering in the Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies object key name filtering rules. For information about key name filtering, see\n Configuring event\n notifications using object key name filtering in the\n Amazon S3 User Guide.
" } }, "com.amazonaws.s3#NotificationId": { @@ -25684,7 +25761,7 @@ "RestoreStatus": { "target": "com.amazonaws.s3#RestoreStatus", "traits": { - "smithy.api#documentation": "Specifies the restoration status of an object. Objects in certain storage classes must be restored\n before they can be retrieved. For more information about these storage classes and how to work with\n archived objects, see \n Working with archived objects in the Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
" } } }, @@ -26200,7 +26277,7 @@ "RestoreStatus": { "target": "com.amazonaws.s3#RestoreStatus", "traits": { - "smithy.api#documentation": "Specifies the restoration status of an object. Objects in certain storage classes must be restored\n before they can be retrieved. For more information about these storage classes and how to work with\n archived objects, see \n Working with archived objects in the Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
" } } }, @@ -26695,7 +26772,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl
. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned\n ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl
header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control\n List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-write
header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.
\n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe following operations are related to PutBucketAcl
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nSets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set the ACL of a\n bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
\nSpecify the ACL in the request body
\nSpecify permissions using request headers
\nYou cannot specify access permission using both the body and the request\n headers.
\nDepending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3\n supports a set of predefined ACLs, known as canned\n ACLs. Each canned ACL has a predefined set of grantees and\n permissions. Specify the canned ACL name as the value of\n x-amz-acl
. If you use this header, you cannot use other\n access control-specific headers in your request. For more information, see\n Canned\n ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use the x-amz-acl
header to set a canned\n ACL. These parameters map to the set of permissions that Amazon S3 supports in an\n ACL. For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-write
header grants\n create, overwrite, and delete objects permission to LogDelivery group\n predefined by Amazon S3 and two Amazon Web Services accounts identified by their email\n addresses.
\n x-amz-grant-write:\n uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\",\n id=\"555566667777\"
\n
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe following operations are related to PutBucketAcl
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetObjectAcl\n
\nSets an analytics configuration for the bucket (specified by the analytics configuration\n ID). You can have up to 1,000 analytics configurations per bucket.
\nYou can choose to have storage class analysis export analysis reports sent to a\n comma-separated values (CSV) flat file. See the DataExport
request element.\n Reports are updated daily and are based on the object filters that you configure. When\n selecting data export, you specify a destination bucket and an optional destination prefix\n where the file is written. You can export the data to a destination bucket in a different\n account. However, the destination bucket must be in the same Region as the bucket that you\n are making the PUT analytics configuration to. For more information, see Amazon S3\n Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is\n written to grant permissions to Amazon S3 to write objects to the bucket. For an example\n policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n PutBucketAnalyticsConfiguration
has the following special errors:
\n HTTP Error: HTTP 400 Bad Request\n
\n\n Code: InvalidArgument\n
\n\n Cause: Invalid argument.\n
\n\n HTTP Error: HTTP 400 Bad Request\n
\n\n Code: TooManyConfigurations\n
\n\n Cause: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.\n
\n\n HTTP Error: HTTP 403 Forbidden\n
\n\n Code: AccessDenied\n
\n\n Cause: You are not the owner of the specified bucket, or you do\n not have the s3:PutAnalyticsConfiguration bucket permission to set the\n configuration on the bucket.\n
\nThe following operations are related to PutBucketAnalyticsConfiguration
:
Sets an analytics configuration for the bucket (specified by the analytics configuration\n ID). You can have up to 1,000 analytics configurations per bucket.
\nYou can choose to have storage class analysis export analysis reports sent to a\n comma-separated values (CSV) flat file. See the DataExport
request element.\n Reports are updated daily and are based on the object filters that you configure. When\n selecting data export, you specify a destination bucket and an optional destination prefix\n where the file is written. You can export the data to a destination bucket in a different\n account. However, the destination bucket must be in the same Region as the bucket that you\n are making the PUT analytics configuration to. For more information, see Amazon S3\n Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is\n written to grant permissions to Amazon S3 to write objects to the bucket. For an example\n policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permissions to perform the\n s3:PutAnalyticsConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n PutBucketAnalyticsConfiguration
has the following special errors:
\n HTTP Error: HTTP 400 Bad Request\n
\n\n Code: InvalidArgument\n
\n\n Cause: Invalid argument.\n
\n\n HTTP Error: HTTP 400 Bad Request\n
\n\n Code: TooManyConfigurations\n
\n\n Cause: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.\n
\n\n HTTP Error: HTTP 403 Forbidden\n
\n\n Code: AccessDenied\n
\n\n Cause: You are not the owner of the specified bucket, or you do\n not have the s3:PutAnalyticsConfiguration bucket permission to set the\n configuration on the bucket.\n
\nThe following operations are related to\n PutBucketAnalyticsConfiguration
:
This action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS),\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side\n encryption with customer-provided keys (SSE-C). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default\n encryption, see Amazon S3 bucket default encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see\n Amazon S3 Bucket\n Keys in the Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nThis action uses the encryption
subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).
\nTo use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
\n GetBucketEncryption\n
\nPuts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to\n 1,000 S3 Intelligent-Tiering configurations per bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access\n or Deep Archive Access tier.
\n\n PutBucketIntelligentTieringConfiguration
has the following special errors:
\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutIntelligentTieringConfiguration
\n bucket permission to set the configuration on the bucket.
Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to\n 1,000 S3 Intelligent-Tiering configurations per bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access\n or Deep Archive Access tier.
\n\n PutBucketIntelligentTieringConfiguration
has the following special\n errors:
\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutIntelligentTieringConfiguration
bucket\n permission to set the configuration on the bucket.
This implementation of the PUT
action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same Amazon Web Services Region as the\n source bucket.
\nWhen you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3 Inventory in the\n Amazon S3 User Guide.
\nYou must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permission to perform the\n s3:PutInventoryConfiguration
action. The bucket owner has this permission\n by default and can grant this permission to others.
The s3:PutInventoryConfiguration
permission allows a user to create an\n S3\n Inventory report that includes all object metadata fields available and to\n specify the destination bucket to store the inventory. A user with read access to objects\n in the destination bucket can also access all object metadata fields that are available in\n the inventory report.
To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the\n Amazon S3 User Guide. For more information about the metadata fields\n available in S3 Inventory, see Amazon S3\n Inventory lists in the Amazon S3 User Guide. For more\n information about permissions, see Permissions related to bucket subresource operations and Identity and\n access management in Amazon S3 in the Amazon S3 User Guide.
\n\n PutBucketInventoryConfiguration
has the following special errors:
\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutInventoryConfiguration
bucket\n permission to set the configuration on the bucket.
The following operations are related to PutBucketInventoryConfiguration
:
This implementation of the PUT
action adds an inventory configuration\n (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory\n configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly\n basis, and the results are published to a flat file. The bucket that is inventoried is\n called the source bucket, and the bucket where the inventory flat file\n is stored is called the destination bucket. The\n destination bucket must be in the same Amazon Web Services Region as the\n source bucket.
\nWhen you configure an inventory for a source bucket, you specify\n the destination bucket where you want the inventory to be stored, and\n whether to generate the inventory daily or weekly. You can also configure what object\n metadata to include and whether to inventory all object versions or only current versions.\n For more information, see Amazon S3 Inventory in the\n Amazon S3 User Guide.
\nYou must create a bucket policy on the destination bucket to\n grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an\n example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
\nTo use this operation, you must have permission to perform the\n s3:PutInventoryConfiguration
action. The bucket owner has this\n permission by default and can grant this permission to others.
The s3:PutInventoryConfiguration
permission allows a user to\n create an S3 Inventory\n report that includes all object metadata fields available and to specify the\n destination bucket to store the inventory. A user with read access to objects in\n the destination bucket can also access all object metadata fields that are\n available in the inventory report.
To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the\n Amazon S3 User Guide. For more information about the metadata\n fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For\n more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the\n Amazon S3 User Guide.
\n\n PutBucketInventoryConfiguration
has the following special errors:
\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n Cause: You are not the owner of the specified bucket, or\n you do not have the s3:PutInventoryConfiguration
bucket permission to\n set the configuration on the bucket.
The following operations are related to\n PutBucketInventoryConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.
\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle\n configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists\n of the following:
\nA filter identifying a subset of objects to which the rule applies. The filter can\n be based on a key name prefix, object tags, or a combination of both.
\nA status indicating whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want Amazon S3 to\n perform on the objects identified by the filter. If the state of your bucket is\n versioning-enabled or versioning-suspended, you can have many versions of the same\n object (one current version and zero or more noncurrent versions). Amazon S3 provides\n predefined actions that you can specify for current and noncurrent object\n versions.
\nFor more information, see Object Lifecycle Management\n and Lifecycle Configuration Elements.
\nBy default, all Amazon S3 resources are private, including buckets, objects, and related\n subresources (for example, lifecycle configuration and website configuration). Only the\n resource owner (that is, the Amazon Web Services account that created it) can access the resource. The\n resource owner can optionally grant access permissions to others by writing an access\n policy. For this operation, a user must get the s3:PutLifecycleConfiguration
\n permission.
You can also explicitly deny permissions. An explicit deny also supersedes any other\n permissions. If you want to block users or accounts from removing or deleting objects from\n your bucket, you must deny them permissions for the following actions:
\n\n s3:DeleteObject
\n
\n s3:DeleteObjectVersion
\n
\n s3:PutLifecycleConfiguration
\n
For more information about permissions, see Managing Access Permissions to\n Your Amazon S3 Resources.
\nThe following operations are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.
\nBucket lifecycle configuration now supports specifying a lifecycle rule using an\n object key name prefix, one or more object tags, or a combination of both. Accordingly,\n this section describes the latest API. The previous version of the API supported\n filtering based only on an object key name prefix, which is supported for backward\n compatibility. For the related API description, see PutBucketLifecycle.
\nYou specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3\n Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.\n Each rule consists of the following:
\nA filter identifying a subset of objects to which the rule applies. The\n filter can be based on a key name prefix, object tags, or a combination of\n both.
\nA status indicating whether the rule is in effect.
\nOne or more lifecycle transition and expiration actions that you want\n Amazon S3 to perform on the objects identified by the filter. If the state of\n your bucket is versioning-enabled or versioning-suspended, you can have many\n versions of the same object (one current version and zero or more noncurrent\n versions). Amazon S3 provides predefined actions that you can specify for current\n and noncurrent object versions.
\nFor more information, see Object Lifecycle\n Management and Lifecycle Configuration\n Elements.
\nBy default, all Amazon S3 resources are private, including buckets, objects, and\n related subresources (for example, lifecycle configuration and website\n configuration). Only the resource owner (that is, the Amazon Web Services account that created\n it) can access the resource. The resource owner can optionally grant access\n permissions to others by writing an access policy. For this operation, a user must\n get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. An explicit deny also supersedes any\n other permissions. If you want to block users or accounts from removing or\n deleting objects from your bucket, you must deny them permissions for the\n following actions:
\n\n s3:DeleteObject
\n
\n s3:DeleteObjectVersion
\n
\n s3:PutLifecycleConfiguration
\n
For more information about permissions, see Managing Access\n Permissions to Your Amazon S3 Resources.
\nThe following operations are related to\n PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.
\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee
request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (by using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
\n DisplayName
is optional and ignored in the request.
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser
and, in a response to a GETObjectAcl
\n request, appears as the CanonicalUser.
By URI:
\n\n
\n
To enable logging, you use LoggingEnabled
and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus
request element:
\n
\n
For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.
\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nSet the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as\n the source bucket. To set the logging status of a bucket, you must be the bucket\n owner.
\nThe bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee
request element to grant access to other people. The\n Permissions
request element specifies the kind of access the grantee has to\n the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3\n Object Ownership, you can't use the Grantee
request element to grant access\n to others. Permissions can only be granted using policies. For more information, see\n Permissions for server access log delivery in the\n Amazon S3 User Guide.
You can specify the person (grantee) to whom you're assigning access rights (by\n using request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
\n DisplayName
is optional and ignored in the request.
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser
and, in a\n response to a GETObjectAcl
request, appears as the\n CanonicalUser.
By URI:
\n\n
\n
To enable logging, you use LoggingEnabled
and its children request\n elements. To disable logging, you use an empty BucketLoggingStatus
request\n element:
\n
\n
For more information about server access logging, see Server Access Logging in the\n Amazon S3 User Guide.
\nFor more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.
\nThe following operations are related to PutBucketLogging
:
\n PutObject\n
\n\n DeleteBucket\n
\n\n CreateBucket\n
\n\n GetBucketLogging\n
\nApplies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n PutBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked from performing \n these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
For more information, see Bucket policy\n examples.
\nThe following operations are related to PutBucketPolicy
:
\n CreateBucket\n
\n\n DeleteBucket\n
\nApplies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than\n the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the\n PutBucketPolicy
permissions on the specified bucket and belong to the\n bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403\n Access Denied
error. If you have the correct permissions, but you're not using an\n identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not\n Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own\n buckets, the root principal in a bucket owner's Amazon Web Services account can perform the\n GetBucketPolicy
, PutBucketPolicy
, and\n DeleteBucketPolicy
API actions, even if their bucket policy explicitly\n denies the root principal's access. Bucket owner root principals can only be blocked\n from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations\n policies.
For more information, see Bucket policy\n examples.
\nThe following operations are related to PutBucketPolicy
:
\n CreateBucket\n
\n\n DeleteBucket\n
\nCreates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following:\n SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
To create a PutBucketReplication
request, you must have\n s3:PutReplicationConfiguration
permissions for the bucket.\n \n
By default, a resource owner, in this case the Amazon Web Services account that created the bucket,\n can perform this operation. The resource owner can also grant others permissions to perform\n the operation. For more information about permissions, see Specifying Permissions in a\n Policy and Managing Access Permissions to\n Your Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nCreates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information. You can invoke this request for a specific\n Amazon Web Services Region by using the \n \n aws:RequestedRegion
\n condition key.
A replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using\n server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects,\n add the following: SourceSelectionCriteria
,\n SseKmsEncryptedObjects
, Status
,\n EncryptionConfiguration
, and ReplicaKmsKeyID
. For\n information about replication configuration, see Replicating\n Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
To create a PutBucketReplication
request, you must have\n s3:PutReplicationConfiguration
permissions for the bucket.\n \n
By default, a resource owner, in this case the Amazon Web Services account that created the\n bucket, can perform this operation. The resource owner can also grant others\n permissions to perform the operation. For more information about permissions, see\n Specifying Permissions in\n a Policy and Managing Access\n Permissions to Your Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have\n the iam:PassRole\n permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nSets the tags for a bucket.
\nUse tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this,\n sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost\n of combined resources, organize your billing information according to resources with the\n same tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation and\n Tagging and Using Cost Allocation in Amazon S3 Bucket\n Tags.
\nWhen this operation sets the tags for a bucket, it will overwrite any current tags\n the bucket already has. You cannot use this operation to add tags to an existing list of\n tags.
\nTo use this operation, you must have permissions to perform the\n s3:PutBucketTagging
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n PutBucketTagging
has the following special errors:
Error code: InvalidTagError
\n
Description: The tag provided was not a valid tag. This error can occur if\n the tag did not pass input validation. For information about tag restrictions,\n see User-Defined Tag Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions.
\nError code: MalformedXMLError
\n
Description: The XML provided does not match the schema.
\nError code: OperationAbortedError
\n
Description: A conflicting conditional action is currently in progress\n against this resource. Please try again.
\nError code: InternalError
\n
Description: The service was unable to apply the provided tag to the\n bucket.
\nThe following operations are related to PutBucketTagging
:
\n GetBucketTagging\n
\n\n DeleteBucketTagging\n
\nSets the tags for a bucket.
\nUse tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this,\n sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost\n of combined resources, organize your billing information according to resources with the\n same tag key values. For example, you can tag several resources with a specific application\n name, and then organize your billing information to see the total cost of that application\n across several services. For more information, see Cost Allocation and\n Tagging and Using Cost Allocation in Amazon S3\n Bucket Tags.
\nWhen this operation sets the tags for a bucket, it will overwrite any current tags\n the bucket already has. You cannot use this operation to add tags to an existing list of\n tags.
\nTo use this operation, you must have permissions to perform the\n s3:PutBucketTagging
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.
\n PutBucketTagging
has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.
\n InvalidTag
- The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Using\n Cost Allocation in Amazon S3 Bucket Tags.
\n MalformedXML
- The XML provided does not match the\n schema.
\n OperationAborted
- A conflicting conditional action is\n currently in progress against this resource. Please try again.
\n InternalError
- The service was unable to apply the provided\n tag to the bucket.
The following operations are related to PutBucketTagging
:
\n GetBucketTagging\n
\n\n DeleteBucketTagging\n
\nSets the versioning state of an existing bucket.
\nYou can set the versioning state with one of the following values:
\n\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.
\n\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.
\nIf the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.
\nIn order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request
header and the Status
and the\n MfaDelete
request elements in a request to set the versioning state of the\n bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and\n you want to maintain the same permanent delete behavior when you enable versioning, you\n must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will\n manage the deletes of the noncurrent object versions in the version-enabled bucket. (A\n version-enabled bucket maintains one current and zero or more noncurrent object\n versions.) For more information, see Lifecycle and Versioning.
\nThe following operations are related to PutBucketVersioning
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetBucketVersioning\n
\nSets the versioning state of an existing bucket.
\nYou can set the versioning state with one of the following values:
\n\n Enabled—Enables versioning for the objects in the\n bucket. All objects added to the bucket receive a unique version ID.
\n\n Suspended—Disables versioning for the objects in the\n bucket. All objects added to the bucket receive the version ID null.
\nIf the versioning state has never been set on a bucket, it has no versioning state; a\n GetBucketVersioning request does not return a versioning state value.
\nIn order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner\n and want to enable MFA Delete in the bucket versioning configuration, you must include the\n x-amz-mfa request
header and the Status
and the\n MfaDelete
request elements in a request to set the versioning state of the\n bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket\n and you want to maintain the same permanent delete behavior when you enable versioning,\n you must add a noncurrent expiration policy. The noncurrent expiration lifecycle\n configuration will manage the deletes of the noncurrent object versions in the\n version-enabled bucket. (A version-enabled bucket maintains one current and zero or more\n noncurrent object versions.) For more information, see Lifecycle and Versioning.
\nThe following operations are related to PutBucketVersioning
:
\n CreateBucket\n
\n\n DeleteBucket\n
\n\n GetBucketVersioning\n
\nSets the configuration of the website that is specified in the website
\n subresource. To configure a bucket as a website, you can add this subresource on the bucket\n with website configuration information such as the file name of the index document and any\n redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default,\n only the bucket owner can configure the website attached to a bucket; however, bucket\n owners can allow other users to set the website configuration by writing a bucket policy\n that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a\n website configuration with the following elements. Because all requests are sent to another\n website, you don't need to provide index document name for the bucket.
\n\n WebsiteConfiguration
\n
\n RedirectAllRequestsTo
\n
\n HostName
\n
\n Protocol
\n
If you want granular control over redirects, you can use the following elements to add\n routing rules that describe conditions for redirecting requests and information about the\n redirect destination. In this case, the website configuration must provide an index\n document for the bucket, because some requests might not be redirected.
\n\n WebsiteConfiguration
\n
\n IndexDocument
\n
\n Suffix
\n
\n ErrorDocument
\n
\n Key
\n
\n RoutingRules
\n
\n RoutingRule
\n
\n Condition
\n
\n HttpErrorCodeReturnedEquals
\n
\n KeyPrefixEquals
\n
\n Redirect
\n
\n Protocol
\n
\n HostName
\n
\n ReplaceKeyPrefixWith
\n
\n ReplaceKeyWith
\n
\n HttpRedirectCode
\n
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more\n than 50 routing rules, you can use object redirect. For more information, see Configuring an\n Object Redirect in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Sets the configuration of the website that is specified in the website
\n subresource. To configure a bucket as a website, you can add this subresource on the bucket\n with website configuration information such as the file name of the index document and any\n redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default,\n only the bucket owner can configure the website attached to a bucket; however, bucket\n owners can allow other users to set the website configuration by writing a bucket policy\n that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a\n website configuration with the following elements. Because all requests are sent to another\n website, you don't need to provide index document name for the bucket.
\n\n WebsiteConfiguration
\n
\n RedirectAllRequestsTo
\n
\n HostName
\n
\n Protocol
\n
If you want granular control over redirects, you can use the following elements to add\n routing rules that describe conditions for redirecting requests and information about the\n redirect destination. In this case, the website configuration must provide an index\n document for the bucket, because some requests might not be redirected.
\n\n WebsiteConfiguration
\n
\n IndexDocument
\n
\n Suffix
\n
\n ErrorDocument
\n
\n Key
\n
\n RoutingRules
\n
\n RoutingRule
\n
\n Condition
\n
\n HttpErrorCodeReturnedEquals
\n
\n KeyPrefixEquals
\n
\n Redirect
\n
\n Protocol
\n
\n HostName
\n
\n ReplaceKeyPrefixWith
\n
\n ReplaceKeyWith
\n
\n HttpRedirectCode
\n
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more\n than 50 routing rules, you can use object redirect. For more information, see Configuring an\n Object Redirect in the Amazon S3 User Guide.
\nThe maximum request length is limited to 128 KB.
", "smithy.api#examples": [ { "title": "Set website configuration on a bucket", @@ -28095,17 +28172,17 @@ "smithy.api#documentation": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.
\nAmazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject
to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock.
\nTo ensure that data is not corrupted traversing the network, use the\n Content-MD5
header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.
To successfully complete the PutObject
request, you must have the\n s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request,\n you must have the s3:PutObjectAcl
in your IAM permissions.
To successfully set the tag-set with your PutObject
request, you\n must have the s3:PutObjectTagging
in your IAM permissions.
The Content-MD5
header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.
You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption.
\nWhen adding a new object, you can use headers to grant ACL-based permissions to\n individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are\n then added to the ACL on the object. By default, all objects are private. Only the owner\n has full access control. For more information, see Access Control List (ACL) Overview\n and Managing\n ACLs Using the REST API.
\nIf the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control
\n canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that\n contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400
error with the error code AccessControlListNotSupported
.\n For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all\n objects written to the bucket by any account will be owned by the bucket owner.
\nBy default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.
\nIf you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.
\nFor more information about related Amazon S3 APIs, see the following:
\n\n CopyObject\n
\n\n DeleteObject\n
\nUses the acl
subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP
\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
\nDepending on your application needs, you can choose to set the ACL on an object using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, you can continue to use that approach.\n For more information, see Access Control List (ACL) Overview\n in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-ac
l. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned\n ACL.
Specify access permissions explicitly with the x-amz-grant-read
,\n x-amz-grant-read-acp
, x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl
header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control\n List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the\n following:
\n\n id
– if the value specified is the canonical user ID of an\n Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email address of\n an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants list\n objects permission to the two Amazon Web Services accounts identified by their email\n addresses.
\n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\"
\n
You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.
\nYou can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId
subresource.
The following operations are related to PutObjectAcl
:
\n CopyObject\n
\n\n GetObject\n
\nUses the acl
subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP
\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
\nDepending on your application needs, you can choose to set the ACL on an object using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, you can continue to use that approach.\n For more information, see Access Control List (ACL) Overview\n in the Amazon S3 User Guide.
\nIf your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs\n are disabled and no longer affect permissions. You must use policies to grant access to\n your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return\n the AccessControlListNotSupported
error code. Requests to read ACLs are\n still supported. For more information, see Controlling object\n ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
\nSpecify a canned ACL with the x-amz-acl
request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has\n a predefined set of grantees and permissions. Specify the canned ACL name as\n the value of x-amz-ac
l. If you use this header, you cannot use\n other access control-specific headers in your request. For more information,\n see Canned\n ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. When using these headers,\n you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3\n groups) who will receive the permission. If you use these ACL-specific\n headers, you cannot use x-amz-acl
header to set a canned ACL.\n These parameters map to the set of permissions that Amazon S3 supports in an ACL.\n For more information, see Access Control List (ACL)\n Overview.
You specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants\n list objects permission to the two Amazon Web Services accounts identified by their email\n addresses.
\n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\"
\n
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can specify the person (grantee) to whom you're assigning access rights\n (using request elements) in the following ways:
\nBy the person's ID:
\n\n
\n
DisplayName is optional and ignored in the request.
\nBy URI:
\n\n
\n
By Email address:
\n\n
\n
The grantee is resolved to the CanonicalUser and, in a response to a GET\n Object acl request, appears as the CanonicalUser.
\nUsing email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nThe ACL of an object is set at the object version level. By default, PUT sets\n the ACL of the current version of an object. To set the ACL of a different\n version, use the versionId
subresource.
The following operations are related to PutObjectAcl
:
\n CopyObject\n
\n\n GetObject\n
\nIf present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets passed\n on to Amazon Web Services KMS for future GetObject
or CopyObject
operations on\n this object.
If present, specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject
or CopyObject
\n operations on this object.
If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header specifies the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.
If x-amz-server-side-encryption
has a valid value of aws:kms
\n or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms
or\n x-amz-server-side-encryption:aws:kms:dsse
, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3
) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on to\n Amazon Web Services KMS for future GetObject
or CopyObject
operations on this\n object.
Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject
or CopyObject
operations on\n this object.
Sets the supplied tag-set to an object that already exists in a bucket.
\nA tag is a key-value pair. You can associate tags with an object by sending a PUT\n request against the tagging subresource that is associated with the object. You can\n retrieve tags by sending a GET request. For more information, see GetObjectTagging.
\nFor tagging-related restrictions related to characters and encodings, see Tag\n Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per\n object.
\nTo use this operation, you must have permission to perform the\n s3:PutObjectTagging
action. By default, the bucket owner has this\n permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You\n also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
\n\n PutObjectTagging
has the following special errors:
\n Code: InvalidTagError \n
\n\n Cause: The tag provided was not a valid tag. This error can occur\n if the tag did not pass input validation. For more information, see Object\n Tagging.\n
\n\n Code: MalformedXMLError \n
\n\n Cause: The XML provided does not match the schema.\n
\n\n Code: OperationAbortedError \n
\n\n Cause: A conflicting conditional action is currently in progress\n against this resource. Please try again.\n
\n\n Code: InternalError\n
\n\n Cause: The service was unable to apply the provided tag to the\n object.\n
\nThe following operations are related to PutObjectTagging
:
\n GetObjectTagging\n
\n\n DeleteObjectTagging\n
\nSets the supplied tag-set to an object that already exists in a bucket. A tag is a\n key-value pair. For more information, see Object Tagging.
\nYou can associate tags with an object by sending a PUT request against the tagging\n subresource that is associated with the object. You can retrieve tags by sending a GET\n request. For more information, see GetObjectTagging.
\nFor tagging-related restrictions related to characters and encodings, see Tag\n Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per\n object.
\nTo use this operation, you must have permission to perform the\n s3:PutObjectTagging
action. By default, the bucket owner has this\n permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You\n also need permission for the s3:PutObjectVersionTagging
action.
\n PutObjectTagging
has the following special errors. For more Amazon S3 errors\n see, Error\n Responses.
\n InvalidTag
- The tag provided was not a valid tag. This error\n can occur if the tag did not pass input validation. For more information, see Object\n Tagging.
\n MalformedXML
- The XML provided does not match the\n schema.
\n OperationAborted
- A conflicting conditional action is\n currently in progress against this resource. Please try again.
\n InternalError
- The service was unable to apply the provided\n tag to the object.
The following operations are related to PutObjectTagging
:
\n GetObjectTagging\n
\n\n DeleteObjectTagging\n
\nCreates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket.\n To use this operation, you must have the s3:PutBucketPublicAccessBlock
\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or\n an object, it checks the PublicAccessBlock
configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock
configurations are different between the bucket and\n the account, Amazon S3 uses the most restrictive combination of the bucket-level and\n account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
\nThe following operations are related to PutPublicAccessBlock
:
\n GetPublicAccessBlock\n
\nCreates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket.\n To use this operation, you must have the s3:PutBucketPublicAccessBlock
\n permission. For more information about Amazon S3 permissions, see Specifying Permissions in a\n Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or\n an object, it checks the PublicAccessBlock
configuration for both the\n bucket (or the bucket that contains the object) and the bucket owner's account. If the\n PublicAccessBlock
configurations are different between the bucket and\n the account, S3 uses the most restrictive combination of the bucket-level and\n account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
\nThe following operations are related to PutPublicAccessBlock
:
\n GetPublicAccessBlock\n
\nConfirms that the requester knows that they will be charged for the request. Bucket\n owners need not specify this parameter in their requests. For information about downloading\n objects from Requester Pays buckets, see Downloading Objects in\n Requester Pays Buckets in the Amazon S3 User Guide.
" + "smithy.api#documentation": "Confirms that the requester knows that they will be charged for the request. Bucket\n owners need not specify this parameter in their requests. If either the source or\n destination Amazon S3 bucket has Requester Pays enabled, the requester will pay for\n corresponding charges to copy the object. For information about downloading objects from\n Requester Pays buckets, see Downloading Objects in\n Requester Pays Buckets in the Amazon S3 User Guide.
" } }, "com.amazonaws.s3#RequestPaymentConfiguration": { @@ -29712,7 +29795,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "Restores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using\n Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your\n query in the request body's SelectParameters
structure. You can use\n expressions like the following examples.
The following expression returns all records from the specified\n object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object,\n you can specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field\n to IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
When making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by default\n and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or\n S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage\n classes, you must first initiate a restore request, and then wait until a temporary copy of\n the object is available. If you want a permanent copy of the object, create a copy of it in\n the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must\n restore the object for the duration (number of days) that you specify. For objects in the\n Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first\n initiate a restore request, and then wait until the object is moved into the Frequent\n Access tier.
\nTo restore a specific object version, you can provide a version ID. If you don't provide\n a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data access tier\n options in the Tier
element of the request body:
\n Expedited
- Expedited retrievals allow you to quickly access your\n data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives\n are required. For all but the largest archived objects (250 MB+), data accessed using\n Expedited retrievals is typically made available within 1–5 minutes. Provisioned\n capacity ensures that retrieval capacity for Expedited retrievals is available when\n you need it. Expedited retrievals and provisioned capacity are not available for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard
- Standard retrievals allow you to access any of your\n archived objects within several hours. This is the default option for retrieval\n requests that do not specify the retrieval option. Standard retrievals typically\n finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible\n Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within\n 12 hours for objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in\n S3 Intelligent-Tiering.
\n Bulk
- Bulk retrievals free for objects stored in the S3 Glacier\n Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to\n retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically\n finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval\n Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are\n also the lowest-cost retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for objects\n stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive\n tier.
For more information about archive retrieval options and provisioned capacity for\n Expedited
data access, see Restoring Archived Objects in\n the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed\n while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
request.\n Operations return the x-amz-restore
header, which provides information about\n the restoration status, in the response. You can use Amazon S3 event notifications to notify you\n when a restore is initiated or completed. For more information, see Configuring Amazon S3\n Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing\n the request with a new period. Amazon S3 updates the restoration period relative to the current\n time and charges only for the request-there are no data transfer charges. You cannot\n update the restoration period when Amazon S3 is actively processing your current restore request\n for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an expiration\n action, the object expiration overrides the life span that you specify in a restore\n request. For example, if you restore an object copy for 10 days, but the object is\n scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information\n about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management\n in Amazon S3 User Guide.
\nA successful action returns either the 200 OK
or 202 Accepted
\n status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the\n response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error does not\n apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available. Try again\n later. (Returned if there is insufficient capacity to process the Expedited\n request. This error applies only to Expedited retrievals and not to\n S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject
:
Restores an archived copy of an object back into Amazon S3
\nThis action is not supported by Amazon S3 on Outposts.
\nThis action performs the following types of requests:
\n\n select
- Perform a select query on an archived object
\n restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the\n following:
\n PutObject\n
\n\n Managing Access with ACLs in the\n Amazon S3 User Guide\n
\n\n Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide\n
\nDefine the SQL expression for the SELECT
type of restoration for your query\n in the request body's SelectParameters
structure. You can use expressions like\n the following examples.
The following expression returns all records from the specified object.
\n\n SELECT * FROM Object
\n
Assuming that you are not using any headers for data stored in the object, you can\n specify columns with positional headers.
\n\n SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
\n
If you have headers and you set the fileHeaderInfo
in the\n CSV
structure in the request body to USE
, you can\n specify headers in the query. (If you set the fileHeaderInfo
field to\n IGNORE
, the first row is skipped for the query.) You cannot mix\n ordinal positions with header column names.
\n SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
\n
When making a select request, you can also do the following:
\nTo expedite your queries, specify the Expedited
tier. For more\n information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that\n is being queried and the serialization of the CSV-encoded query results.
\nThe following are additional important facts about the select feature:
\nThe output results are new Amazon S3 objects. Unlike archive retrievals, they are\n stored until explicitly deleted-manually or through a lifecycle configuration.
\nYou can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't\n duplicate requests, so avoid issuing duplicate requests.
\n Amazon S3 accepts a select request even if the object has already been restored. A\n select request doesn’t return error response 409
.
To use this operation, you must have permissions to perform the\n s3:RestoreObject
action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.
\nTo restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.
\nWhen restoring an archived object, you can specify one of the following data\n access tier options in the Tier
element of the request body:
\n Expedited
- Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
\n Standard
- Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.
\n Bulk
- Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity\n for Expedited
data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.
\nTo get the status of object restoration, you can send a HEAD
\n request. Operations return the x-amz-restore
header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.
\nIf your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.
\nA successful action returns either the 200 OK
or 202\n Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202\n Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in\n the response.
Special errors:
\n\n Code: RestoreAlreadyInProgress\n
\n\n Cause: Object restore is already in progress. (This error\n does not apply to SELECT type requests.)\n
\n\n HTTP Status Code: 409 Conflict\n
\n\n SOAP Fault Code Prefix: Client\n
\n\n Code: GlacierExpeditedRetrievalNotAvailable\n
\n\n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n
\n\n HTTP Status Code: 503\n
\n\n SOAP Fault Code Prefix: N/A\n
\nThe following operations are related to RestoreObject
:
Specifies whether the object is currently being restored. If the object restoration is\n in progress, the header returns the value TRUE
. For example:
\n x-amz-optional-object-attributes: IsRestoreInProgress=\"true\"
\n
If the object restoration has completed, the header returns the value FALSE
. For example:
\n x-amz-optional-object-attributes: IsRestoreInProgress=\"false\", RestoreExpiryDate=\"2012-12-21T00:00:00.000Z\"
\n
If the object hasn't been restored, there is no header response.
" + "smithy.api#documentation": "Specifies whether the object is currently being restored. If the object restoration is\n in progress, the header returns the value TRUE
. For example:
\n x-amz-optional-object-attributes: IsRestoreInProgress=\"true\"
\n
If the object restoration has completed, the header returns the value\n FALSE
. For example:
\n x-amz-optional-object-attributes: IsRestoreInProgress=\"false\",\n RestoreExpiryDate=\"2012-12-21T00:00:00.000Z\"
\n
If the object hasn't been restored, there is no header response.
" } }, "RestoreExpiryDate": { "target": "com.amazonaws.s3#RestoreExpiryDate", "traits": { - "smithy.api#documentation": "Indicates when the restored copy will expire. This value is populated only if the object\n has already been restored. For example:
\n\n x-amz-optional-object-attributes: IsRestoreInProgress=\"false\", RestoreExpiryDate=\"2012-12-21T00:00:00.000Z\"
\n
Indicates when the restored copy will expire. This value is populated only if the object\n has already been restored. For example:
\n\n x-amz-optional-object-attributes: IsRestoreInProgress=\"false\",\n RestoreExpiryDate=\"2012-12-21T00:00:00.000Z\"
\n
Specifies the restoration status of an object. Objects in certain storage classes must be restored\n before they can be retrieved. For more information about these storage classes and how to work with\n archived objects, see \n Working with archived objects in the Amazon S3 User Guide.
" + "smithy.api#documentation": "Specifies the restoration status of an object. Objects in certain storage classes must\n be restored before they can be retrieved. For more information about these storage classes\n and how to work with archived objects, see Working with archived\n objects in the Amazon S3 User Guide.
" } }, "com.amazonaws.s3#Role": { @@ -30087,7 +30170,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.
\n \nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying\n Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side\n Encryption (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys\n (SSE-KMS), server-side encryption is handled transparently, so you don't need to\n specify anything. For more information about server-side encryption, including SSE-S3\n and SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
The SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and \n DEEP_ARCHIVE_ACCESS
access tiers of \n the INTELLIGENT_TIERING
storage class: You cannot query objects in \n the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the \n ARCHIVE_ACCESS
or \n DEEP_ARCHIVE_ACCESS
access tiers of \n the INTELLIGENT_TIERING
storage class. For\n more information about storage classes, see Using Amazon S3 storage\n classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\nThe following operations are related to SelectObjectContent
:
\n GetObject\n
\nThis action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.
\n \nYou must have s3:GetObject
permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.
\nGiven the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding
header with\n chunked
as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.
The SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n
The GLACIER
, DEEP_ARCHIVE
, and\n REDUCED_REDUNDANCY
storage classes, or the\n ARCHIVE_ACCESS
and DEEP_ARCHIVE_ACCESS
access\n tiers of the INTELLIGENT_TIERING
storage class: You cannot\n query objects in the GLACIER
, DEEP_ARCHIVE
, or\n REDUCED_REDUNDANCY
storage classes, nor objects in the\n ARCHIVE_ACCESS
or DEEP_ARCHIVE_ACCESS
access\n tiers of the INTELLIGENT_TIERING
storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.
For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n
\nThe following operations are related to SelectObjectContent
:
\n GetObject\n
\nAmazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
.
You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. If you use\n a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow\n log.
\nIf you are using encryption with cross-account or Amazon Web Services service operations you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.
\nAmazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm
is set to\n aws:kms
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS\n key.
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key Alias: alias/alias-name
\n
If you use a key ID, you can run into a LogDestination undeliverable error when creating\n a VPC flow log.
\nIf you are using encryption with cross-account or Amazon Web Services service operations you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
\nAmazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.
\nUploads a part by copying data from an existing object as data source. You specify the\n data source by adding the request header x-amz-copy-source
in your request and\n a byte range by adding the request header x-amz-copy-source-range
in your\n request.
For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nInstead of using an existing object as part data, you might use the UploadPart\n action and provide data in your request.
\nYou must initiate a multipart upload before you can upload any part. In response to your\n initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in\n your upload part request.
\nFor more information about using the UploadPartCopy
operation, see the\n following:
For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide.
\nFor information about permissions required to use the multipart upload API, see\n Multipart Upload and Permissions in the\n Amazon S3 User Guide.
\nFor information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.
\nFor information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers\n x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
,\n x-amz-copy-source-if-unmodified-since
, and\n x-amz-copy-source-if-modified-since
:
\n
\n Consideration 1 - If both of the\n x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the\n request as follows:
\n x-amz-copy-source-if-match
condition evaluates to true
,\n and;
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
;
Amazon S3 returns 200 OK
and copies the data.\n
\n Consideration 2 - If both of the\n x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the\n request as follows:
\n x-amz-copy-source-if-none-match
condition evaluates to\n false
, and;
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
;
Amazon S3 returns 412 Precondition Failed
response code.\n
If your bucket has versioning enabled, you could have multiple versions of the same\n object. By default, x-amz-copy-source
identifies the current version of the\n object to copy. If the current version is a delete marker and you don't specify a versionId\n in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does\n not exist. If you specify versionId in the x-amz-copy-source
and the versionId\n is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify\n a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the\n versionId
subresource as shown in the following example:
\n x-amz-copy-source: /bucket/object?versionId=version id
\n
\n Code: NoSuchUpload\n
\n\n Cause: The specified multipart upload does not exist. The upload\n ID might be invalid, or the multipart upload might have been aborted or\n completed.\n
\n\n HTTP Status Code: 404 Not Found\n
\n\n Code: InvalidRequest\n
\n\n Cause: The specified copy source is not supported as a byte-range\n copy source.\n
\n\n HTTP Status Code: 400 Bad Request\n
\nThe following operations are related to UploadPartCopy
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nUploads a part by copying data from an existing object as data source. You specify the\n data source by adding the request header x-amz-copy-source
in your request and\n a byte range by adding the request header x-amz-copy-source-range
in your\n request.
For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.
\nInstead of using an existing object as part data, you might use the UploadPart\n action and provide data in your request.
\nYou must initiate a multipart upload before you can upload any part. In response to your\n initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in\n your upload part request.
\nFor more information about using the UploadPartCopy
operation, see the\n following:
For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide.
\nFor information about permissions required to use the multipart upload API, see\n Multipart Upload and Permissions in the\n Amazon S3 User Guide.
\nFor information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.
\nFor information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers\n x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
,\n x-amz-copy-source-if-unmodified-since
, and\n x-amz-copy-source-if-modified-since
:
\n
\n Consideration 1 - If both of the\n x-amz-copy-source-if-match
and\n x-amz-copy-source-if-unmodified-since
headers are present in the\n request as follows:
\n x-amz-copy-source-if-match
condition evaluates to true
,\n and;
\n x-amz-copy-source-if-unmodified-since
condition evaluates to\n false
;
Amazon S3 returns 200 OK
and copies the data.\n
\n Consideration 2 - If both of the\n x-amz-copy-source-if-none-match
and\n x-amz-copy-source-if-modified-since
headers are present in the\n request as follows:
\n x-amz-copy-source-if-none-match
condition evaluates to\n false
, and;
\n x-amz-copy-source-if-modified-since
condition evaluates to\n true
;
Amazon S3 returns 412 Precondition Failed
response code.\n
If your bucket has versioning enabled, you could have multiple versions of the\n same object. By default, x-amz-copy-source
identifies the current\n version of the object to copy. If the current version is a delete marker and you\n don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a\n 404 error, because the object does not exist. If you specify versionId in the\n x-amz-copy-source
and the versionId is a delete marker, Amazon S3\n returns an HTTP 400 error, because you are not allowed to specify a delete marker\n as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by\n adding the versionId
subresource as shown in the following\n example:
\n x-amz-copy-source: /bucket/object?versionId=version id
\n
\n Code: NoSuchUpload\n
\n\n Cause: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.\n
\n\n HTTP Status Code: 404 Not Found\n
\n\n Code: InvalidRequest\n
\n\n Cause: The specified copy source is not supported as a\n byte-range copy source.\n
\n\n HTTP Status Code: 400 Bad Request\n
\nThe following operations are related to UploadPartCopy
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nIf present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for stored in Amazon S3 object.
", + "smithy.api#documentation": "If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n encryption customer managed key that was used for stored in Amazon S3 object.
", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" } }, diff --git a/codegen/sdk/aws-models/sagemaker.json b/codegen/sdk/aws-models/sagemaker.json index 7e8935f3f9d..0ad31ffeaf2 100644 --- a/codegen/sdk/aws-models/sagemaker.json +++ b/codegen/sdk/aws-models/sagemaker.json @@ -3869,6 +3869,12 @@ "traits": { "smithy.api#documentation": "If specified, monitoring jobs subtract this time from the end time. For information\n about using offsets for scheduling monitoring jobs, see Schedule Model\n Quality Monitoring Jobs.
" } + }, + "ExcludeFeaturesAttribute": { + "target": "com.amazonaws.sagemaker#ExcludeFeaturesAttribute", + "traits": { + "smithy.api#documentation": "The attributes of the input data to exclude from the analysis.
" + } } }, "traits": { @@ -4321,18 +4327,18 @@ "CsvContentTypes": { "target": "com.amazonaws.sagemaker#CsvContentTypes", "traits": { - "smithy.api#documentation": "The list of all content type headers that SageMaker will treat as CSV and capture accordingly.
" + "smithy.api#documentation": "The list of all content type headers that SageMaker will treat as CSV and capture\n accordingly.
" } }, "JsonContentTypes": { "target": "com.amazonaws.sagemaker#JsonContentTypes", "traits": { - "smithy.api#documentation": "The list of all content type headers that SageMaker will treat as JSON and capture accordingly.
" + "smithy.api#documentation": "The list of all content type headers that SageMaker will treat as JSON and capture\n accordingly.
" } } }, "traits": { - "smithy.api#documentation": "Configuration specifying how to treat different headers. If no headers are specified SageMaker \n will by default base64 encode when capturing the data.
" + "smithy.api#documentation": "Configuration specifying how to treat different headers. If no headers are specified\n SageMaker will by default base64 encode when capturing the data.
" } }, "com.amazonaws.sagemaker#CaptureMode": { @@ -6866,7 +6872,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
\nWe recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n and non-tabular problem types such as image or text classification.
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
" + "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
\nWe recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as non-tabular problem types\n such as image or text classification.
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
" } }, "com.amazonaws.sagemaker#CreateAutoMLJobRequest": { @@ -6974,7 +6980,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
\n\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as time-series forecasting,\n and non-tabular problem types such as image or text classification.
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
For the list of available problem types supported by CreateAutoMLJobV2
, see\n AutoMLProblemTypeConfig.
You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
" + "smithy.api#documentation": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
\n\n CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob\n and DescribeAutoMLJob which offer backward compatibility.
\n\n CreateAutoMLJobV2
can manage tabular problem types identical to those of\n its previous version CreateAutoMLJob
, as well as non-tabular problem types\n such as image or text classification.
Find guidelines about how to migrate a CreateAutoMLJob
to\n CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
For the list of available problem types supported by CreateAutoMLJobV2
, see\n AutoMLProblemTypeConfig.
You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
" } }, "com.amazonaws.sagemaker#CreateAutoMLJobV2Request": { @@ -7374,7 +7380,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management\n User Guide.
" + "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost\n Management User Guide.
" } } }, @@ -8968,7 +8974,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the\n Amazon Web Services account.
", + "smithy.api#documentation": "The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -9023,7 +9029,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management\n User Guide.
" + "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost\n Management User Guide.
" } } }, @@ -9222,7 +9228,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the model explainability job definition. The name must be unique within an\n Amazon Web Services Region in the Amazon Web Services account.
", + "smithy.api#documentation": "The name of the model explainability job definition. The name must be unique within an\n Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -9277,7 +9283,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management\n User Guide.
" + "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost\n Management User Guide.
" } } }, @@ -9562,6 +9568,13 @@ "traits": { "smithy.api#documentation": "An array of additional Inference Specification objects. Each additional \n Inference Specification specifies artifacts based on this model package that can \n be used on inference endpoints. Generally used with SageMaker Neo to store the \n compiled artifacts.
" } + }, + "SkipModelValidation": { + "target": "com.amazonaws.sagemaker#SkipModelValidation", + "traits": { + "smithy.api#default": "None", + "smithy.api#documentation": "Indicates if you want to skip model validation.
" + } } }, "traits": { @@ -9664,7 +9677,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management\n User Guide.
" + "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost\n Management User Guide.
" } } }, @@ -9713,7 +9726,7 @@ "MonitoringScheduleName": { "target": "com.amazonaws.sagemaker#MonitoringScheduleName", "traits": { - "smithy.api#documentation": "The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within\n an Amazon Web Services account.
", + "smithy.api#documentation": "The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -9727,7 +9740,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management\n User Guide.
" + "smithy.api#documentation": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost\n Management User Guide.
" } } }, @@ -11351,7 +11364,7 @@ "InitialSamplingPercentage": { "target": "com.amazonaws.sagemaker#SamplingPercentage", "traits": { - "smithy.api#documentation": "The percentage of requests SageMaker will capture. A lower value is recommended for \n Endpoints with high traffic.
", + "smithy.api#documentation": "The percentage of requests SageMaker will capture. A lower value is recommended for Endpoints\n with high traffic.
", "smithy.api#required": {} } }, @@ -11365,20 +11378,20 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt the\n captured data at rest using Amazon S3 server-side encryption.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker uses to encrypt the captured data at rest using Amazon S3 server-side encryption.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
Specifies data Model Monitor will capture. You can configure whether to \n collect only input, only output, or both
", + "smithy.api#documentation": "Specifies data Model Monitor will capture. You can configure whether to collect only\n input, only output, or both
", "smithy.api#required": {} } }, "CaptureContentTypeHeader": { "target": "com.amazonaws.sagemaker#CaptureContentTypeHeader", "traits": { - "smithy.api#documentation": "Configuration specifying how to treat different headers. If no headers are specified SageMaker will \n by default base64 encode when capturing the data.
" + "smithy.api#documentation": "Configuration specifying how to treat different headers. If no headers are specified\n SageMaker will by default base64 encode when capturing the data.
" } } }, @@ -17967,7 +17980,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the model bias job definition. The name must be unique within an Amazon Web Services Region\n in the Amazon Web Services account.
", + "smithy.api#documentation": "The name of the model bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } } @@ -17989,7 +18002,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the\n Amazon Web Services account.
", + "smithy.api#documentation": "The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -18041,7 +18054,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that\n has read permission to the input data location and write permission to the output data\n location in Amazon S3.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management\n (IAM) role that has read permission to the input data location and write permission to the\n output data location in Amazon S3.
", "smithy.api#required": {} } }, @@ -18302,7 +18315,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the model explainability job definition. The name must be unique within an\n Amazon Web Services Region in the Amazon Web Services account.
", + "smithy.api#documentation": "The name of the model explainability job definition. The name must be unique within an\n Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } } @@ -18324,7 +18337,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the explainability job definition. The name must be unique within an Amazon Web Services\n Region in the Amazon Web Services account.
", + "smithy.api#documentation": "The name of the explainability job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -18376,7 +18389,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) role that\n has read permission to the input data location and write permission to the output data\n location in Amazon S3.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management\n (IAM) role that has read permission to the input data location and write permission to the\n output data location in Amazon S3.
", "smithy.api#required": {} } }, @@ -18727,6 +18740,12 @@ "traits": { "smithy.api#documentation": "An array of additional Inference Specification objects. Each additional \n Inference Specification specifies artifacts based on this model package that can \n be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.
" } + }, + "SkipModelValidation": { + "target": "com.amazonaws.sagemaker#SkipModelValidation", + "traits": { + "smithy.api#documentation": "Indicates if you want to skip model validation.
" + } } }, "traits": { @@ -18756,7 +18775,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the model quality job. The name must be unique within an Amazon Web Services Region in the\n Amazon Web Services account.
", + "smithy.api#documentation": "The name of the model quality job. The name must be unique within an Amazon Web Services\n Region in the Amazon Web Services account.
", "smithy.api#required": {} } } @@ -18778,7 +18797,7 @@ "JobDefinitionName": { "target": "com.amazonaws.sagemaker#MonitoringJobDefinitionName", "traits": { - "smithy.api#documentation": "The name of the quality job definition. The name must be unique within an Amazon Web Services Region in\n the Amazon Web Services account.
", + "smithy.api#documentation": "The name of the quality job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -23022,6 +23041,12 @@ "traits": { "smithy.api#documentation": "If specified, monitoring jobs substract this time from the end time. For information\n about using offsets for scheduling monitoring jobs, see Schedule Model\n Quality Monitoring Jobs.
" } + }, + "ExcludeFeaturesAttribute": { + "target": "com.amazonaws.sagemaker#ExcludeFeaturesAttribute", + "traits": { + "smithy.api#documentation": "The attributes of the input data to exclude from the analysis.
" + } } }, "traits": { @@ -23445,6 +23470,15 @@ "smithy.api#pattern": "^[\\S\\s]*$" } }, + "com.amazonaws.sagemaker#ExcludeFeaturesAttribute": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.sagemaker#ExecutionRoleIdentityConfig": { "type": "enum", "members": { @@ -37902,7 +37936,7 @@ "ConfigUri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "JSON formatted S3 file that defines bias parameters. For more information on this JSON\n configuration file, see Configure bias\n parameters.
", + "smithy.api#documentation": "JSON formatted S3 file that defines bias parameters. For more information on this JSON\n configuration file, see Configure\n bias parameters.
", "smithy.api#required": {} } }, @@ -38904,7 +38938,7 @@ "ConfigUri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "JSON formatted S3 file that defines explainability parameters. For more information on\n this JSON configuration file, see Configure model\n explainability parameters.
", + "smithy.api#documentation": "JSON formatted S3 file that defines explainability parameters. For more information on\n this JSON configuration file, see Configure model explainability parameters.
", "smithy.api#required": {} } }, @@ -39379,6 +39413,12 @@ "traits": { "smithy.api#documentation": "Represents the drift check baselines that can be used when the model monitor is set using the model package.
" } + }, + "SkipModelValidation": { + "target": "com.amazonaws.sagemaker#SkipModelValidation", + "traits": { + "smithy.api#documentation": "Indicates if you want to skip model validation.
" + } } }, "traits": { @@ -40563,7 +40603,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data\n on the storage volume attached to the ML compute instance(s) that run the model monitoring\n job.
" + "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon\n SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s)\n that run the model monitoring job.
" } } }, @@ -40972,7 +41012,7 @@ "target": "com.amazonaws.sagemaker#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates if the file should be read as a json object per line.\n
" + "smithy.api#documentation": "Indicates if the file should be read as a json object per line.
" } } }, @@ -41043,7 +41083,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model\n artifacts at rest using Amazon S3 server-side encryption.
" + "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker\n uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
" } } }, @@ -41383,7 +41423,7 @@ "target": "com.amazonaws.sagemaker#MonitoringMaxRuntimeInSeconds", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The maximum runtime allowed in seconds.
\nThe MaxRuntimeInSeconds
cannot exceed the frequency of the job. For data quality and\n model explainability, this can be up to 3600 seconds for an hourly schedule. For model\n bias and model quality hourly schedules, this can be up to 1800 seconds.
The maximum runtime allowed in seconds.
\nThe MaxRuntimeInSeconds
cannot exceed the frequency of the job. For data\n quality and model explainability, this can be up to 3600 seconds for an hourly schedule.\n For model bias and model quality hourly schedules, this can be up to 1800\n seconds.
A cron expression that describes details about the monitoring schedule.
\nCurrently the only supported cron expressions are:
\nIf you want to set the job to start every hour, please use the following:
\n\n Hourly: cron(0 * ? * * *)
\n
If you want to start the job daily:
\n\n cron(0 [00-23] ? * * *)
\n
For example, the following are valid cron expressions:
\nDaily at noon UTC: cron(0 12 ? * * *)
\n
Daily at midnight UTC: cron(0 0 ? * * *)
\n
To support running every 6, 12 hours, the following are also supported:
\n\n cron(0 [00-23]/[01-24] ? * * *)
\n
For example, the following are valid cron expressions:
\nEvery 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)
\n
Every two hours starting at midnight: cron(0 0/2 ? * * *)
\n
Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.
\nWe recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.
\nA cron expression that describes details about the monitoring schedule.
\nThe supported cron expressions are:
\nIf you want to set the job to start every hour, use the following:
\n\n Hourly: cron(0 * ? * * *)
\n
If you want to start the job daily:
\n\n cron(0 [00-23] ? * * *)
\n
For example, the following are valid cron expressions:
\nDaily at noon UTC: cron(0 12 ? * * *)
\n
Daily at midnight UTC: cron(0 0 ? * * *)
\n
To support running every 6, 12 hours, the following are also supported:
\n\n cron(0 [00-23]/[01-24] ? * * *)
\n
For example, the following are valid cron expressions:
\nEvery 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)
\n
Every two hours starting at midnight: cron(0 0/2 ? * * *)
\n
Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.
\nWe recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.
\nYou can also specify the keyword NOW
to run the monitoring job immediately, one time,\n without recurring.
Sets the start time for a monitoring job window. Express this time as an offset to the\n times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the\n ScheduleExpression
parameter. Specify this offset in ISO 8601 duration\n format. For example, if you want to monitor the five hours of data in your dataset that\n precede the start of each monitoring job, you would specify: \"-PT5H\"
.
The start time that you specify must not precede the end time that you specify by more\n than 24 hours. You specify the end time with the DataAnalysisEndTime
\n parameter.
If you set ScheduleExpression
to NOW
, this parameter is\n required.
Sets the end time for a monitoring job window. Express this time as an offset to the\n times that you schedule your monitoring jobs to run. You schedule monitoring jobs with the\n ScheduleExpression
parameter. Specify this offset in ISO 8601 duration\n format. For example, if you want to end the window one hour before the start of each\n monitoring job, you would specify: \"-PT1H\"
.
The end time that you specify must not follow the start time that you specify by more\n than 24 hours. You specify the start time with the DataAnalysisStartTime
\n parameter.
If you set ScheduleExpression
to NOW
, this parameter is\n required.
The collection of holiday featurization attributes used to incorporate national holiday\n information into your forecasting model.
" + "smithy.api#documentation": "The collection of holidays featurization attributes used to incorporate national holiday\n information into your forecasting model.
" } } }, @@ -58661,7 +58730,7 @@ "MonitoringScheduleName": { "target": "com.amazonaws.sagemaker#MonitoringScheduleName", "traits": { - "smithy.api#documentation": "The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within\n an Amazon Web Services account.
", + "smithy.api#documentation": "The name of the monitoring schedule. The name must be unique within an Amazon Web Services Region within an Amazon Web Services account.
", "smithy.api#required": {} } }, diff --git a/codegen/sdk/aws-models/servicediscovery.json b/codegen/sdk/aws-models/servicediscovery.json index 15725cb349e..5ff916fb3a9 100644 --- a/codegen/sdk/aws-models/servicediscovery.json +++ b/codegen/sdk/aws-models/servicediscovery.json @@ -323,7 +323,7 @@ "Name": { "target": "com.amazonaws.servicediscovery#NamespaceNamePublic", "traits": { - "smithy.api#documentation": "The name that you want to assign to this namespace.
\nDo not include sensitive information in the name. The name is publicly available using DNS queries.
\nThe name that you want to assign to this namespace.
\nDo not include sensitive information in the name. The name is publicly available using DNS\n queries.
\nThe name that you want to assign to the service.
\nDo not include sensitive information in the name if the namespace is discoverable by public DNS queries.
\nIf you want Cloud Map to create an SRV
record when you register an instance and you're using a\n system that requires a specific SRV
format, such as HAProxy, specify the following for Name
:
Start the name with an underscore (_), such as _exampleservice
.
End the name with ._protocol, such as ._tcp
.
When you register an instance, Cloud Map creates an SRV
record and assigns a\n name to the record by concatenating the service name and the namespace name (for example,
\n _exampleservice._tcp.example.com
).
For services that are accessible by DNS queries, you can't create multiple services with\n names that differ only by case (such as EXAMPLE and example). Otherwise, these services have the\n same DNS name and can't be distinguished. However, if you use a namespace that's only accessible\n by API calls, then you can create services that with names that differ only by case.
\nThe name that you want to assign to the service.
\nDo not include sensitive information in the name if the namespace is discoverable by public\n DNS queries.
\nIf you want Cloud Map to create an SRV
record when you register an instance\n and you're using a system that requires a specific SRV
format, such as HAProxy, specify the following for\n Name
:
Start the name with an underscore (_), such as _exampleservice
.
End the name with ._protocol, such as ._tcp
.
When you register an instance, Cloud Map creates an SRV
record and assigns a\n name to the record by concatenating the service name and the namespace name (for example,
\n _exampleservice._tcp.example.com
).
For services that are accessible by DNS queries, you can't create multiple services with\n names that differ only by case (such as EXAMPLE and example). Otherwise, these services have the\n same DNS name and can't be distinguished. However, if you use a namespace that's only accessible\n by API calls, then you can create services that with names that differ only by case.
\nDiscovers registered instances for a specified namespace and service. You can use\n DiscoverInstances
to discover instances for any type of namespace. For public and\n private DNS namespaces, you can also use DNS queries to discover instances.
Discovers registered instances for a specified namespace and service. You can use\n DiscoverInstances
to discover instances for any type of namespace.\n DiscoverInstances
returns a randomized list of instances allowing customers to\n distribute traffic evenly across instances. For public and private DNS namespaces, you can also\n use DNS queries to discover instances.
A complex type that contains one HttpInstanceSummary
for each registered\n instance.
The increasing revision associated to the response Instances list. If a new instance is\n registered or deregistered, the InstancesRevision
updates. The health status updates\n don't update InstancesRevision
.
Discovers the increasing revision associated with an instance.
", + "smithy.api#endpoint": { + "hostPrefix": "data-" + } + } + }, + "com.amazonaws.servicediscovery#DiscoverInstancesRevisionRequest": { + "type": "structure", + "members": { + "NamespaceName": { + "target": "com.amazonaws.servicediscovery#NamespaceName", + "traits": { + "smithy.api#documentation": "The HttpName
name of the namespace. It's found in the\n HttpProperties
member of the Properties
member of the\n namespace.
The name of the service that you specified when you registered the instance.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.servicediscovery#DiscoverInstancesRevisionResponse": { + "type": "structure", + "members": { + "InstancesRevision": { + "target": "com.amazonaws.servicediscovery#Revision", + "traits": { + "smithy.api#documentation": "The increasing revision associated to the response Instances list. If a new instance is\n registered or deregistered, the InstancesRevision
updates. The health status updates\n don't update InstancesRevision
.
Gets information about any operation that returns an operation ID in the response, such as a\n CreateService
request.
To get a list of operations that match specified criteria, see ListOperations.
\nGets information about any operation that returns an operation ID in the response, such as a\n CreateHttpNamespace
request.
To get a list of operations that match specified criteria, see ListOperations.
\nA string map that contains the following information for the service that you specify in\n ServiceId
:
The attributes that apply to the records that are defined in the service.
\nFor each attribute, the applicable value.
\nDo not include sensitive information in the attributes if the namespace is discoverable by public DNS\n queries.
\nSupported attribute keys include the following:
\nIf you want Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing\n load balancer, specify the DNS name that's associated with the load balancer. For information\n about how to get the DNS name, see AliasTarget->DNSName in the Route 53 API Reference.
\nNote the following:
\nThe configuration for the service that's specified by ServiceId
must\n include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of\n RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes\n HealthCheckConfig
settings, Cloud Map creates the health check, but it won't\n associate the health check with the alias record.
Auto naming currently doesn't support creating alias records that route traffic to\n Amazon Web Services resources other than ELB load balancers.
\nIf you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for\n any of the AWS_INSTANCE
attributes.
\n HTTP namespaces only. The Amazon EC2 instance ID for the instance. The\n AWS_INSTANCE_IPV4
attribute contains the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can\n optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom\n health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for\n AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that\n you want Route 53 to return in response to DNS queries (for example,\n example.com
).
This value is required if the service specified by ServiceId
includes\n settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you\n want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes\n settings for an A
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that\n you want Route 53 to return in response to DNS queries (for example,\n 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes\n settings for an AAAA
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to\n return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you\n want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a\n Route 53 health check when you created the service.
A string map that contains the following information for the service that you specify in\n ServiceId
:
The attributes that apply to the records that are defined in the service.
\nFor each attribute, the applicable value.
\nDo not include sensitive information in the attributes if the namespace is discoverable by\n public DNS queries.
\nSupported attribute keys include the following:
\nIf you want Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing\n load balancer, specify the DNS name that's associated with the load balancer. For information\n about how to get the DNS name, see AliasTarget->DNSName in the Route 53 API Reference.
\nNote the following:
\nThe configuration for the service that's specified by ServiceId
must\n include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of\n RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes\n HealthCheckConfig
settings, Cloud Map creates the health check, but it won't\n associate the health check with the alias record.
Auto naming currently doesn't support creating alias records that route traffic to\n Amazon Web Services resources other than ELB load balancers.
\nIf you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for\n any of the AWS_INSTANCE
attributes.
\n HTTP namespaces only. The Amazon EC2 instance ID for the instance. The\n AWS_INSTANCE_IPV4
attribute contains the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can\n optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom\n health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for\n AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that\n you want Route 53 to return in response to DNS queries (for example,\n example.com
).
This value is required if the service specified by ServiceId
includes\n settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you\n want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes\n settings for an A
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that\n you want Route 53 to return in response to DNS queries (for example,\n 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes\n settings for an AAAA
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to\n return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you\n want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a\n Route 53 health check when you created the service.
Specify the operator that you want to use to determine whether a namespace matches the\n specified value. Valid values for Condition
are one of the following.
\n EQ
: When you specify EQ
for Condition
, you can\n specify only one value. EQ
is supported for TYPE
, NAME
,\n and HTTP_NAME
. EQ
is the default condition and can be\n omitted.
\n BEGINS_WITH
: When you specify BEGINS_WITH
for\n Condition
, you can specify only one value. BEGINS_WITH
is supported\n for TYPE
, NAME
, and HTTP_NAME
.
Specify the operator that you want to use to determine whether a namespace matches the\n specified value. Valid values for Condition
are one of the following.
\n EQ
: When you specify EQ
for Condition
, you can\n specify only one value. EQ
is supported for TYPE
, NAME
,\n and HTTP_NAME
. EQ
is the default condition and can be omitted.
\n BEGINS_WITH
: When you specify BEGINS_WITH
for\n Condition
, you can specify only one value. BEGINS_WITH
is supported\n for TYPE
, NAME
, and HTTP_NAME
.
An identifier that you want to associate with the instance. Note the following:
\nIf the service that's specified by ServiceId
includes settings for an\n SRV
record, the value of InstanceId
is automatically included as\n part of the value for the SRV
record. For more information, see DnsRecord >\n Type.
You can use this value to update an existing instance.
\nTo register a new instance, you must specify a value that's unique among instances that\n you register by using the same service.
\nIf you specify an existing InstanceId
and ServiceId
, Cloud Map\n updates the existing DNS records, if any. If there's also an existing health check, Cloud Map\n deletes the old health check and creates a new one.
The health check isn't deleted immediately, so it will still appear for a while if you\n submit a ListHealthChecks
request, for example.
Do not include sensitive information in InstanceId
if the namespace is discoverable by public DNS\n queries and any Type
member of DnsRecord
for the service contains SRV
because\n the InstanceId
is discoverable by public DNS queries.
An identifier that you want to associate with the instance. Note the following:
\nIf the service that's specified by ServiceId
includes settings for an\n SRV
record, the value of InstanceId
is automatically included as\n part of the value for the SRV
record. For more information, see DnsRecord >\n Type.
You can use this value to update an existing instance.
\nTo register a new instance, you must specify a value that's unique among instances that\n you register by using the same service.
\nIf you specify an existing InstanceId
and ServiceId
, Cloud Map\n updates the existing DNS records, if any. If there's also an existing health check, Cloud Map\n deletes the old health check and creates a new one.
The health check isn't deleted immediately, so it will still appear for a while if you\n submit a ListHealthChecks
request, for example.
Do not include sensitive information in InstanceId
if the namespace is\n discoverable by public DNS queries and any Type
member of DnsRecord
\n for the service contains SRV
because the InstanceId
is discoverable by\n public DNS queries.
A string map that contains the following information for the service that you specify in\n ServiceId
:
The attributes that apply to the records that are defined in the service.
\nFor each attribute, the applicable value.
\nDo not include sensitive information in the attributes if the namespace is discoverable by public DNS\n queries.
\nSupported attribute keys include the following:
\nIf you want Cloud Map to create an Amazon Route 53 alias record that routes traffic to an\n Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For\n information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.
\nNote the following:
\nThe configuration for the service that's specified by ServiceId
must\n include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of\n RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes\n HealthCheckConfig
settings, Cloud Map will create the Route 53 health check, but\n it doesn't associate the health check with the alias record.
Auto naming currently doesn't support creating alias records that route traffic to\n Amazon Web Services resources other than Elastic Load Balancing load balancers.
\nIf you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for\n any of the AWS_INSTANCE
attributes.
\n HTTP namespaces only. The Amazon EC2 instance ID for the instance. If the\n AWS_EC2_INSTANCE_ID
attribute is specified, then the only other attribute that\n can be specified is AWS_INIT_HEALTH_STATUS
. When the\n AWS_EC2_INSTANCE_ID
attribute is specified, then the\n AWS_INSTANCE_IPV4
attribute will be filled out with the primary private IPv4\n address.
If the service configuration includes HealthCheckCustomConfig
, you can\n optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom\n health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for\n AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that\n you want Route 53 to return in response to DNS queries (for example,\n example.com
).
This value is required if the service specified by ServiceId
includes\n settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you\n want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes\n settings for an A
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that\n you want Route 53 to return in response to DNS queries (for example,\n 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes\n settings for an AAAA
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to\n return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you\n want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a\n Route 53 health check when you created the service.
You can add up to 30 custom attributes. For each key-value pair, the maximum length of\n the attribute name is 255 characters, and the maximum length of the attribute value is 1,024\n characters. The total size of all provided attributes (sum of all keys and values) must not\n exceed 5,000 characters.
\nA string map that contains the following information for the service that you specify in\n ServiceId
:
The attributes that apply to the records that are defined in the service.
\nFor each attribute, the applicable value.
\nDo not include sensitive information in the attributes if the namespace is discoverable by\n public DNS queries.
\nSupported attribute keys include the following:
\nIf you want Cloud Map to create an Amazon Route 53 alias record that routes traffic to an\n Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For\n information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.
\nNote the following:
\nThe configuration for the service that's specified by ServiceId
must\n include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of\n RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes\n HealthCheckConfig
settings, Cloud Map will create the Route 53 health check, but\n it doesn't associate the health check with the alias record.
Cloud Map currently doesn't support creating alias records that route traffic to\n Amazon Web Services resources other than Elastic Load Balancing load balancers.
\nIf you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for\n any of the AWS_INSTANCE
attributes.
\n HTTP namespaces only. The Amazon EC2 instance ID for the instance. If the\n AWS_EC2_INSTANCE_ID
attribute is specified, then the only other attribute that\n can be specified is AWS_INIT_HEALTH_STATUS
. When the\n AWS_EC2_INSTANCE_ID
attribute is specified, then the\n AWS_INSTANCE_IPV4
attribute will be filled out with the primary private IPv4\n address.
If the service configuration includes HealthCheckCustomConfig
, you can\n optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom\n health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for\n AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that\n you want Route 53 to return in response to DNS queries (for example,\n example.com
).
This value is required if the service specified by ServiceId
includes\n settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you\n want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes\n settings for an A
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that\n you want Route 53 to return in response to DNS queries (for example,\n 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes\n settings for an AAAA
record. If the service includes settings for an\n SRV
record, you must specify a value for AWS_INSTANCE_IPV4
,\n AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to\n return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you\n want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a\n Route 53 health check when you created the service.
You can add up to 30 custom attributes. For each key-value pair, the maximum length of\n the attribute name is 255 characters, and the maximum length of the attribute value is 1,024\n characters. The total size of all provided attributes (sum of all keys and values) must not\n exceed 5,000 characters.
\nAmazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.
\nThis reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.
\n\n Related resources\n
\nFor information about each of the capabilities that comprise Systems Manager, see Systems Manager capabilities in the Amazon Web Services Systems Manager User Guide.
\nFor details about predefined runbooks for Automation, a capability of Amazon Web Services Systems Manager, see the\n \n Systems Manager Automation runbook reference\n .
\nFor information about AppConfig, a capability of Systems Manager, see the \n AppConfig User Guide\n \n and the \n AppConfig\n API Reference\n .
\nFor information about Incident Manager, a capability of Systems Manager, see the \n Systems Manager Incident Manager User\n Guide\n and the \n Systems Manager Incident Manager API\n Reference\n .
\nAmazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.
\nThis reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.
\n\n Related resources\n
\nFor information about each of the capabilities that comprise Systems Manager, see Systems Manager capabilities in the Amazon Web Services Systems Manager User Guide.
\nFor details about predefined runbooks for Automation, a capability of Amazon Web Services Systems Manager, see the\n \n Systems Manager Automation runbook reference\n .
\nFor information about AppConfig, a capability of Systems Manager, see the \n AppConfig User Guide\n \n and the \n AppConfig\n API Reference\n .
\nFor information about Incident Manager, a capability of Systems Manager, see the \n Systems Manager Incident Manager User\n Guide\n and the \n Systems Manager Incident Manager API\n Reference\n .
\nInformation about the OpsItem.
", + "smithy.api#documentation": "User-defined text that contains information about the OpsItem, in Markdown format.\n
\nProvide enough information so that users viewing this OpsItem for the first time \n understand the issue.
\nThe type of OpsItem to create. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insights
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nThe type of OpsItem to create. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insight
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nThe type of OpsItem. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insights
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nThe type of OpsItem. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insight
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nThe type of OpsItem. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insights
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nThe type of OpsItem. Systems Manager supports the following types of OpsItems:
\n\n /aws/issue
\n
This type of OpsItem is used for default OpsItems created by OpsCenter.
\n\n /aws/changerequest
\n
This type of OpsItem is used by Change Manager for reviewing and approving or rejecting change\n requests.
\n\n /aws/insight
\n
This type of OpsItem is used by OpsCenter for aggregating and reporting on duplicate\n OpsItems.
\nUpdate the information about the OpsItem. Provide enough information so that users reading\n this OpsItem for the first time understand the issue.
" + "smithy.api#documentation": "User-defined text that contains information about the OpsItem, in Markdown format.
" } }, "OperationalData": { diff --git a/codegen/sdk/aws-models/sso-oidc.json b/codegen/sdk/aws-models/sso-oidc.json index e71cdd4a717..ee4a8bbaad0 100644 --- a/codegen/sdk/aws-models/sso-oidc.json +++ b/codegen/sdk/aws-models/sso-oidc.json @@ -281,6 +281,31 @@ ], "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://oidc.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -754,7 +779,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://oidc-fips.us-gov-east-1.amazonaws.com" + "url": "https://oidc.us-gov-east-1.amazonaws.com" } }, "params": { diff --git a/codegen/sdk/aws-models/textract.json b/codegen/sdk/aws-models/textract.json index 5c928414d33..99439ff72da 100644 --- a/codegen/sdk/aws-models/textract.json +++ b/codegen/sdk/aws-models/textract.json @@ -101,7 +101,7 @@ "FeatureTypes": { "target": "com.amazonaws.textract#FeatureTypes", "traits": { - "smithy.api#documentation": "A list of the types of analysis to perform. Add TABLES to the list to return information\n about the tables that are detected in the input document. Add FORMS to return detected form\n data. Add SIGNATURES to return the locations of detected signatures. To perform both forms \n and table analysis, add TABLES and FORMS to FeatureTypes
. To detect signatures within\n form data and table data, add SIGNATURES to either TABLES or FORMS.\n All lines and words detected in the document are included in the response (including text\n that isn't related to the value of FeatureTypes
).
A list of the types of analysis to perform. Add TABLES to the list to return information\n about the tables that are detected in the input document. Add FORMS to return detected form\n data. Add SIGNATURES to return the locations of detected signatures. Add LAYOUT to the list\n to return information about the layout of the document. To perform both forms\n and table analysis, add TABLES and FORMS to FeatureTypes
. To detect signatures\n within the document and within form data and table data, add SIGNATURES to either TABLES or\n FORMS. All lines and words detected in the document are included in the response (including\n text that isn't related to the value of FeatureTypes
).
The type of text item that's recognized. In operations for text detection, the following\n types are returned:
\n\n PAGE - Contains a list of the LINE Block
objects\n that are detected on a document page.
\n WORD - A word detected on a document page. A word is one or\n more ISO basic Latin script characters that aren't separated by spaces.
\n\n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.
\nIn text analysis operations, the following types are returned:
\n\n PAGE - Contains a list of child Block
objects\n that are detected on a document page.
\n KEY_VALUE_SET - Stores the KEY and VALUE Block
\n objects for linked text that's detected on a document page. Use the\n EntityType
field to determine if a KEY_VALUE_SET object is a KEY\n Block
object or a VALUE Block
object.
\n WORD - A word that's detected on a document page. A word is\n one or more ISO basic Latin script characters that aren't separated by spaces.
\n\n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.
\n\n TABLE - A table that's detected on a document page. A table\n is grid-based information with two or more rows or columns, with a cell span of one\n row and one column each.
\n\n TABLE_TITLE - The title of a table. A title is typically a\n line of text above or below a table, or embedded as the first row of a table.
\n\n TABLE_FOOTER - The footer associated with a table. A footer\n is typically a line or lines of text below a table or embedded as the last row of a\n table.
\n\n CELL - A cell within a detected table. The cell is the parent\n of the block that contains the text in the cell.
\n\n MERGED_CELL - A cell in a table whose content spans more than\n one row or column. The Relationships
array for this cell contain data\n from individual cells.
\n SELECTION_ELEMENT - A selection element such as an option\n button (radio button) or a check box that's detected on a document page. Use the\n value of SelectionStatus
to determine the status of the selection\n element.
\n SIGNATURE - The location and confidene score of a signature detected on a\n document page. Can be returned as part of a Key-Value pair or a detected cell.
\n\n QUERY - A question asked during the call of AnalyzeDocument. Contains an\n alias and an ID that attaches it to its answer.
\n\n QUERY_RESULT - A response to a question asked during the call\n of analyze document. Comes with an alias and ID for ease of locating in a \n response. Also contains location and confidence score.
\nThe type of text item that's recognized. In operations for text detection, the following\n types are returned:
\n\n PAGE - Contains a list of the LINE Block
objects\n that are detected on a document page.
\n WORD - A word detected on a document page. A word is one or\n more ISO basic Latin script characters that aren't separated by spaces.
\n\n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.
\nIn text analysis operations, the following types are returned:
\n\n PAGE - Contains a list of child Block
objects\n that are detected on a document page.
\n KEY_VALUE_SET - Stores the KEY and VALUE Block
\n objects for linked text that's detected on a document page. Use the\n EntityType
field to determine if a KEY_VALUE_SET object is a KEY\n Block
object or a VALUE Block
object.
\n WORD - A word that's detected on a document page. A word is\n one or more ISO basic Latin script characters that aren't separated by spaces.
\n\n LINE - A string of tab-delimited, contiguous words that are\n detected on a document page.
\n\n TABLE - A table that's detected on a document page. A table\n is grid-based information with two or more rows or columns, with a cell span of one\n row and one column each.
\n\n TABLE_TITLE - The title of a table. A title is typically a\n line of text above or below a table, or embedded as the first row of a table.
\n\n TABLE_FOOTER - The footer associated with a table. A footer\n is typically a line or lines of text below a table or embedded as the last row of a\n table.
\n\n CELL - A cell within a detected table. The cell is the parent\n of the block that contains the text in the cell.
\n\n MERGED_CELL - A cell in a table whose content spans more than\n one row or column. The Relationships
array for this cell contain data\n from individual cells.
\n SELECTION_ELEMENT - A selection element such as an option\n button (radio button) or a check box that's detected on a document page. Use the\n value of SelectionStatus
to determine the status of the selection\n element.
\n SIGNATURE - The location and confidence score of a signature detected on a\n document page. Can be returned as part of a Key-Value pair or a detected cell.
\n\n QUERY - A question asked during the call of AnalyzeDocument. Contains an\n alias and an ID that attaches it to its answer.
\n\n QUERY_RESULT - A response to a question asked during the call\n of analyze document. Comes with an alias and ID for ease of locating in a \n response. Also contains location and confidence score.
\nThe page on which a block was detected. Page
is returned by synchronous and\n asynchronous operations. Page values greater than 1 are only returned for multipage\n documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an\n asynchronous operation, even if it contains multiple document pages, is considered a\n single-page document. This means that for scanned images the value of Page
is\n always 1. Synchronous operations will also return a Page
value of 1\n because every input document is considered to be a single-page document.
The page on which a block was detected. Page
is returned by synchronous and\n asynchronous operations. Page values greater than 1 are only returned for multipage\n documents that are in PDF or TIFF format. A scanned image (JPEG/PNG) provided to an\n asynchronous operation, even if it contains multiple document pages, is considered a\n single-page document. This means that for scanned images the value of Page
is\n always 1.
The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST
requests.
For example, for the URL https://example.com/web/signup
, you would provide the path /web/signup
.
The path of the account creation endpoint for your application. This is the page on your website that accepts the completed registration form for a new user. This page must accept POST
requests.
For example, for the URL https://example.com/web/newaccount
, you would provide\n\tthe path /web/newaccount
. Account creation page paths that\n\tstart with the path that you provide are considered a match. For example\n\t/web/newaccount
matches the account creation paths\n\t\t/web/newaccount
, /web/newaccount/
,\n\t\t/web/newaccountPage
, and\n\t\t/web/newaccount/thisPage
, but doesn't match the path\n\t\t/home/web/newaccount
or\n\t\t/website/newaccount
.
The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users.
\nThis page must accept GET
text/html requests.
For example, for the URL https://example.com/web/register
, you would provide the path /web/register
.
The path of the account registration endpoint for your application. This is the page on your website that presents the registration form to new users.
\nThis page must accept GET
text/html requests.
For example, for the URL https://example.com/web/registration
, you would provide\n\tthe path /web/registration
. Registration page paths that\n\tstart with the path that you provide are considered a match. For example\n\t /web/registration
matches the registration paths\n\t /web/registration
, /web/registration/
,\n\t /web/registrationPage
, and\n\t /web/registration/thisPage
, but doesn't match the path\n\t /home/web/registration
or\n\t /website/registration
.
The path of the login endpoint for your application. For example, for the URL\n https://example.com/web/login
, you would provide the path\n /web/login
.
The rule group inspects only HTTP POST
requests to your specified login endpoint.
The path of the login endpoint for your application. For example, for the URL\n https://example.com/web/login
, you would provide the path\n /web/login
. Login paths that start with the path that you provide are considered a match. For example /web/login
matches the login paths /web/login
, /web/login/
, /web/loginPage
, and /web/login/thisPage
, but doesn't match the login path /home/web/login
or /website/login
.
The rule group inspects only HTTP POST
requests to your specified login endpoint.
A string value that you want WAF to search for. WAF searches only in the part of\n web requests that you designate for inspection in FieldToMatch. The\n maximum length of the value is 200 bytes.
\nValid values depend on the component that you specify for inspection in\n FieldToMatch
:
\n Method
: The HTTP method that you want WAF to search for. This\n indicates the type of operation specified in the request.
\n UriPath
: The value that you want WAF to search for in the URI path,\n for example, /images/daily-ad.jpg
.
\n HeaderOrder
: The comma-separated list of header names to match for. WAF creates a \n string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.
If SearchString
includes alphabetic characters A-Z and a-z, note that the\n value is case sensitive.
\n If you're using the WAF API\n
\nSpecify a base64-encoded version of the value. The maximum length of the value before\n you base64-encode it is 200 bytes.
\nFor example, suppose the value of Type
is HEADER
and the value\n of Data
is User-Agent
. If you want to search the\n User-Agent
header for the value BadBot
, you base64-encode\n BadBot
using MIME base64-encoding and include the resulting value,\n QmFkQm90
, in the value of SearchString
.
\n If you're using the CLI or one of the Amazon Web Services SDKs\n
\nThe value that you want WAF to search for. The SDK automatically base64 encodes the\n value.
", + "smithy.api#documentation": "A string value that you want WAF to search for. WAF searches only in the part of\n web requests that you designate for inspection in FieldToMatch. The\n maximum length of the value is 200 bytes.
\nValid values depend on the component that you specify for inspection in\n FieldToMatch
:
\n Method
: The HTTP method that you want WAF to search for. This\n indicates the type of operation specified in the request.
\n UriPath
: The value that you want WAF to search for in the URI path,\n for example, /images/daily-ad.jpg
.
\n JA3Fingerprint
: The string to match against the web request's JA3 fingerprint header. The header contains a hash fingerprint of the TLS Client Hello packet for \n the request.
\n HeaderOrder
: The comma-separated list of header names to match for. WAF creates a \n string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.
If SearchString
includes alphabetic characters A-Z and a-z, note that the\n value is case sensitive.
\n If you're using the WAF API\n
\nSpecify a base64-encoded version of the value. The maximum length of the value before\n you base64-encode it is 200 bytes.
\nFor example, suppose the value of Type
is HEADER
and the value\n of Data
is User-Agent
. If you want to search the\n User-Agent
header for the value BadBot
, you base64-encode\n BadBot
using MIME base64-encoding and include the resulting value,\n QmFkQm90
, in the value of SearchString
.
\n If you're using the CLI or one of the Amazon Web Services SDKs\n
\nThe value that you want WAF to search for. The SDK automatically base64 encodes the\n value.
", "smithy.api#required": {} } }, @@ -1888,7 +1888,7 @@ "TextTransformations": { "target": "com.amazonaws.wafv2#TextTransformations", "traits": { - "smithy.api#documentation": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nTo configure WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
To configure WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
To configure WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
To configure WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses that you want WAF to inspect for in incoming requests. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nFor requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
For requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
For requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
For requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -4210,7 +4210,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a WebACL per the specifications provided.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" + "smithy.api#documentation": "Creates a WebACL per the specifications provided.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" } }, "com.amazonaws.wafv2#CreateWebACLRequest": { @@ -4246,7 +4246,7 @@ "Rules": { "target": "com.amazonaws.wafv2#Rules", "traits": { - "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -4408,7 +4408,7 @@ "ResponseHeaders": { "target": "com.amazonaws.wafv2#CustomHTTPHeaders", "traits": { - "smithy.api#documentation": "The HTTP headers to use in the response. Duplicate header names are not allowed.
\nFor information about the limits on count and size for custom request and response settings, see WAF quotas \n in the WAF Developer Guide.
" + "smithy.api#documentation": "The HTTP headers to use in the response. You can specify any header name except for content-type
. Duplicate header names are not allowed.
For information about the limits on count and size for custom request and response settings, see WAF quotas \n in the WAF Developer Guide.
" } } }, @@ -5456,6 +5456,12 @@ "traits": { "smithy.api#documentation": "Inspect a string containing the list of the request's header names, ordered as they appear in the web request\nthat WAF receives for inspection. \n WAF generates the string and then uses that as the field to match component in its inspection. \n WAF separates the header names in the string using colons and no added spaces, for example host:user-agent:accept:authorization:referer
.
Match against the request's JA3 fingerprint header. The header contains a hash fingerprint of the TLS Client Hello packet for the request.
\nYou can use this choice only with a string match ByteMatchStatement
with the PositionalConstraint
set to \n EXACTLY
.
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nTo configure WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
To configure WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
To configure WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
To configure WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses that you want WAF to inspect for in incoming requests. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nFor requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
For requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
For requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
For requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
The match status to assign to the web request if the request doesn't have a JA3 fingerprint.
\nYou can specify the following fallback behaviors:
\n\n MATCH
- Treat the web request as matching the rule statement. WAF applies the rule action to the request.
\n NO_MATCH
- Treat the web request as not matching the rule statement.
Match against the request's JA3 fingerprint header. The header contains a hash fingerprint of the TLS Client Hello packet for the request.
\nYou can use this choice only with a string match ByteMatchStatement
with the PositionalConstraint
set to \n EXACTLY
.
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.
\nYou cannot nest a ManagedRuleGroupStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet
, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet
, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet
. For more information, see WAF Pricing.
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.
\nYou cannot nest a ManagedRuleGroupStatement
, for example for use inside a NotStatement
or OrStatement
. You cannot use a managed rule group \n inside another rule group. You can only reference a managed rule group as a top-level statement within a rule that you define in a web ACL.
You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet
, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet
, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet
. For more information, see WAF Pricing.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count.\n Each rule includes one top-level Statement that WAF uses to\n identify matching web requests, and parameters that govern how WAF handles them.
" + "smithy.api#documentation": "A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to manage in some way. \n Each rule includes one top-level Statement that WAF uses to\n identify matching web requests, and parameters that govern how WAF handles them.
" } }, "com.amazonaws.wafv2#RuleAction": { @@ -10743,7 +10764,7 @@ "Rules": { "target": "com.amazonaws.wafv2#Rules", "traits": { - "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -10806,7 +10827,7 @@ } }, "traits": { - "smithy.api#documentation": "A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You \n can only use a rule group reference statement at the top level inside a web ACL.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You cannot use a rule group\n reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You \n can only use a rule group reference statement at the top level inside a web ACL.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You cannot use a rule group\n reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.
\nYou cannot nest a ManagedRuleGroupStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet
, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet
, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet
. For more information, see WAF Pricing.
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups.
\nYou cannot nest a ManagedRuleGroupStatement
, for example for use inside a NotStatement
or OrStatement
. You cannot use a managed rule group \n inside another rule group. You can only reference a managed rule group as a top-level statement within a rule that you define in a web ACL.
You are charged additional fees when you use the WAF Bot Control managed rule group AWSManagedRulesBotControlRuleSet
, the WAF Fraud Control account takeover prevention (ATP) managed rule group AWSManagedRulesATPRuleSet
, or the WAF Fraud Control account creation fraud prevention (ACFP) managed rule group AWSManagedRulesACFPRuleSet
. For more information, see WAF Pricing.
You can specify the following transformation types:
\n\n BASE64_DECODE - Decode a\n Base64
-encoded string.
\n BASE64_DECODE_EXT - Decode a\n Base64
-encoded string, but use a forgiving implementation that ignores\n characters that aren't valid.
\n CMD_LINE - Command-line transformations. These are\n helpful in reducing effectiveness of attackers who inject an operating system command-line \n command and use unusual formatting to disguise some or all of the command.
\nDelete the following characters: \\ \" ' ^
\n
Delete spaces before the following characters: / (
\n
Replace the following characters with a space: , ;
\n
Replace multiple spaces with one space
\nConvert uppercase letters (A-Z) to lowercase (a-z)
\n\n COMPRESS_WHITE_SPACE - Replace these characters\n with a space character (decimal 32):
\n\n \\f
, formfeed, decimal 12
\n \\t
, tab, decimal 9
\n \\n
, newline, decimal 10
\n \\r
, carriage return, decimal 13
\n \\v
, vertical tab, decimal 11
Non-breaking space, decimal 160
\n\n COMPRESS_WHITE_SPACE
also replaces multiple spaces with one space.
\n CSS_DECODE - Decode characters that were encoded\n using CSS 2.x escape rules syndata.html#characters
. This function uses up to\n two bytes in the decoding process, so it can help to uncover ASCII characters that were\n encoded using CSS encoding that wouldn’t typically be encoded. It's also useful in\n countering evasion, which is a combination of a backslash and non-hexadecimal characters.\n For example, ja\\vascript
for javascript.
\n ESCAPE_SEQ_DECODE - Decode the following ANSI C\n escape sequences: \\a
, \\b
, \\f
, \\n
,\n \\r
, \\t
, \\v
, \\\\
, \\?
,\n \\'
, \\\"
, \\xHH
(hexadecimal), \\0OOO
\n (octal). Encodings that aren't valid remain in the output.
\n HEX_DECODE - Decode a string of hexadecimal\n characters into a binary.
\n\n HTML_ENTITY_DECODE - Replace HTML-encoded\n characters with unencoded characters. HTML_ENTITY_DECODE
performs these\n operations:
Replaces (ampersand)quot;
with \"
\n
Replaces (ampersand)nbsp;
with a non-breaking space, decimal\n 160
Replaces (ampersand)lt;
with a \"less than\" symbol
Replaces (ampersand)gt;
with >
\n
Replaces characters that are represented in hexadecimal format,\n (ampersand)#xhhhh;
, with the corresponding characters
Replaces characters that are represented in decimal format,\n (ampersand)#nnnn;
, with the corresponding characters
\n JS_DECODE - Decode JavaScript escape sequences. If\n a\n \\
\n u
\n HHHH
\n code is in the full-width ASCII code range of FF01-FF5E
, then the higher byte\n is used to detect and adjust the lower byte. If not, only the lower byte is used and the\n higher byte is zeroed, causing a possible loss of information.
\n LOWERCASE - Convert uppercase letters (A-Z) to\n lowercase (a-z).
\n\n MD5 - Calculate an MD5 hash from the data in the\n input. The computed hash is in a raw binary form.
\n\n NONE - Specify NONE
if you don't want\n any text transformations.
\n NORMALIZE_PATH - Remove multiple slashes, directory\n self-references, and directory back-references that are not at the beginning of the input\n from an input string.
\n\n NORMALIZE_PATH_WIN - This is the same as\n NORMALIZE_PATH
, but first converts backslash characters to forward slashes.
\n REMOVE_NULLS - Remove all NULL
bytes\n from the input.
\n REPLACE_COMMENTS - Replace each occurrence of a\n C-style comment (/* ... */
) with a single space. Multiple consecutive\n occurrences are not compressed. Unterminated comments are also replaced with a space (ASCII\n 0x20). However, a standalone termination of a comment (*/
) is not acted upon.
\n REPLACE_NULLS - Replace NULL bytes in the input\n with space characters (ASCII 0x20
).
\n SQL_HEX_DECODE - Decode SQL hex data. Example\n (0x414243
) will be decoded to (ABC
).
\n URL_DECODE - Decode a URL-encoded value.
\n\n URL_DECODE_UNI - Like URL_DECODE
, but\n with support for Microsoft-specific %u
encoding. If the code is in the\n full-width ASCII code range of FF01-FF5E
, the higher byte is used to detect\n and adjust the lower byte. Otherwise, only the lower byte is used and the higher byte is\n zeroed.
\n UTF8_TO_UNICODE - Convert all UTF-8 character\n sequences to Unicode. This helps input normalization, and minimizing false-positives and\n false-negatives for non-English languages.
", + "smithy.api#documentation": "For detailed descriptions of each of the transformation types, see Text transformations \n in the WAF Developer Guide.
", "smithy.api#required": {} } } @@ -11809,7 +11830,7 @@ "Addresses": { "target": "com.amazonaws.wafv2#IPAddresses", "traits": { - "smithy.api#documentation": "Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nTo configure WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
To configure WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
To configure WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
To configure WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
Contains an array of strings that specifies zero or more IP addresses or blocks of IP addresses that you want WAF to inspect for in incoming requests. All addresses must be specified using Classless Inter-Domain Routing (CIDR) notation. WAF supports all IPv4 and IPv6 CIDR ranges except for /0
.
Example address strings:
\nFor requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32
.
For requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify \n 192.0.2.0/24
.
For requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128
.
For requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64
.
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing.
\nExample JSON Addresses
specifications:
Empty array: \"Addresses\": []
\n
Array with one address: \"Addresses\": [\"192.0.2.44/32\"]
\n
Array with three addresses: \"Addresses\": [\"192.0.2.44/32\", \"192.0.2.0/24\", \"192.0.0.0/16\"]
\n
INVALID specification: \"Addresses\": [\"\"]
INVALID
The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -12208,7 +12229,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the specified WebACL. While updating a web ACL, WAF provides\n continuous coverage to the resources that you have associated with the web ACL.
\nThis operation completely replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call.
\nTo modify a web ACL, do the following:
\nRetrieve it by calling GetWebACL\n
\nUpdate its settings as needed
\nProvide the complete web ACL specification to this call
\nWhen you make changes to web ACLs or web ACL components, like rules and rule groups, WAF propagates the changes everywhere that the web ACL and its components are stored and used. Your changes are applied within seconds, but there might be a brief period of inconsistency when the changes have arrived in some places and not in others. So, for example, if you change a rule action setting, the action might be the old action in one area and the new action in another area. Or if you add an IP address to an IP set used in a blocking rule, the new address might briefly be blocked in one area while still allowed in another. This temporary inconsistency can occur when you first associate a web ACL with an Amazon Web Services resource and when you change a web ACL that is already associated with a resource. Generally, any inconsistencies of this type last only a few seconds.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" + "smithy.api#documentation": "Updates the specified WebACL. While updating a web ACL, WAF provides\n continuous coverage to the resources that you have associated with the web ACL.
\nThis operation completely replaces the mutable specifications that you already have for the web ACL with the ones that you provide to this call.
\nTo modify a web ACL, do the following:
\nRetrieve it by calling GetWebACL\n
\nUpdate its settings as needed
\nProvide the complete web ACL specification to this call
\nWhen you make changes to web ACLs or web ACL components, like rules and rule groups, WAF propagates the changes everywhere that the web ACL and its components are stored and used. Your changes are applied within seconds, but there might be a brief period of inconsistency when the changes have arrived in some places and not in others. So, for example, if you change a rule action setting, the action might be the old action in one area and the new action in another area. Or if you add an IP address to an IP set used in a blocking rule, the new address might briefly be blocked in one area while still allowed in another. This temporary inconsistency can occur when you first associate a web ACL with an Amazon Web Services resource and when you change a web ACL that is already associated with a resource. Generally, any inconsistencies of this type last only a few seconds.
\nA web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" } }, "com.amazonaws.wafv2#UpdateWebACLRequest": { @@ -12251,7 +12272,7 @@ "Rules": { "target": "com.amazonaws.wafv2#Rules", "traits": { - "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -12705,7 +12726,7 @@ "Rules": { "target": "com.amazonaws.wafv2#Rules", "traits": { - "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to allow, block, or count. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" + "smithy.api#documentation": "The Rule statements used to identify the web requests that you \n want to manage. Each rule includes one top-level statement that WAF uses to identify matching \n web requests, and parameters that govern how WAF handles them. \n
" } }, "VisibilityConfig": { @@ -12779,7 +12800,7 @@ } }, "traits": { - "smithy.api#documentation": "A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" + "smithy.api#documentation": "A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.
" } }, "com.amazonaws.wafv2#WebACLSummaries": { @@ -12839,7 +12860,7 @@ "TextTransformations": { "target": "com.amazonaws.wafv2#TextTransformations", "traits": { - "smithy.api#documentation": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the component contents.
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. Text transformations are used in rule match statements, to transform the FieldToMatch
request component before inspecting it, and they're used in rate-based rule statements, to transform request components before using them as custom aggregation keys. If you specify one or more transformations to apply, WAF performs all transformations on the specified content, starting from the lowest priority setting, and then uses the transformed component contents.
The resource for which members (users or groups) are associated.
", + "smithy.api#documentation": "The resource for which members (users or groups) are associated.
\nThe identifier can accept ResourceId, Resourcename, or email. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nEmail address: resource@domain.tld
\nResource name: resource
\nThe member (user or group) to associate to the resource.
", + "smithy.api#documentation": "The member (user or group) to associate to the resource.
\nThe entity ID can accept UserId or GroupID, Username or Groupname, or email.
\nEntity: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity: entity
\nThe group to which the member (user or group) is associated.
", + "smithy.api#documentation": "The group to which the member (user or group) is associated.
\nThe identifier can accept GroupId, Groupname, or email. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: group@domain.tld
\nGroup name: group
\nThe member (user or group) to associate to the group.
", + "smithy.api#documentation": "The member (user or group) to associate to the group.
\nThe member ID can accept UserID or GroupId, Username or Groupname, or email.
\nMember: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: member@domain.tld
\nMember name: member
\nThe name of the group.
", "smithy.api#required": {} } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If this parameter is enabled, the group will be hidden from the address book.
" + } } }, "traits": { @@ -1184,6 +1195,9 @@ }, { "target": "com.amazonaws.workmail#ReservedNameException" + }, + { + "target": "com.amazonaws.workmail#UnsupportedOperationException" } ], "traits": { @@ -1214,6 +1228,19 @@ "smithy.api#documentation": "The type of the new resource. The available types are equipment
and\n room
.
Resource description.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If this parameter is enabled, the resource will be hidden from the address book.
" + } } }, "traits": { @@ -1294,7 +1321,7 @@ } }, "DisplayName": { - "target": "com.amazonaws.workmail#String", + "target": "com.amazonaws.workmail#UserAttribute", "traits": { "smithy.api#documentation": "The display name for the new user.
", "smithy.api#required": {} @@ -1303,8 +1330,32 @@ "Password": { "target": "com.amazonaws.workmail#Password", "traits": { - "smithy.api#documentation": "The password for the new user.
", - "smithy.api#required": {} + "smithy.api#documentation": "The password for the new user.
" + } + }, + "Role": { + "target": "com.amazonaws.workmail#UserRole", + "traits": { + "smithy.api#documentation": "The role of the new user.
\nYou cannot pass SYSTEM_USER or RESOURCE role in a single request. When a user role is not selected, the default role of USER is selected.
" + } + }, + "FirstName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "The first name of the new user.
" + } + }, + "LastName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "The last name of the new user.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If this parameter is enabled, the user will be hidden from the address book.
" } } }, @@ -1606,9 +1657,9 @@ } }, "GroupId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the group to be deleted.
", + "smithy.api#documentation": "The identifier of the group to be deleted.
\nThe identifier can be the GroupId, or Groupname. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nGroup name: group
\nThe identifier of the member (user or group) that owns the mailbox.
", + "smithy.api#documentation": "The identifier of the entity that owns the mailbox.
\nThe identifier can be UserId or Group Id, Username or Groupname, or email.
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nThe identifier of the member (user or group) for which to delete granted\n permissions.
", + "smithy.api#documentation": "The identifier of the entity for which to delete granted permissions.
\nThe identifier can be UserId, ResourceID, or Group Id, Username or Groupname, or email.
\nGrantee ID: 12345678-1234-1234-1234-123456789012,r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: grantee@domain.tld
\nGrantee name: grantee
\nIf true, deletes the AWS Directory Service directory associated with the organization.
", "smithy.api#required": {} } + }, + "ForceDelete": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "Deletes a WorkMail organization even if the organization has enabled users.
" + } } }, "traits": { @@ -1950,6 +2008,9 @@ }, { "target": "com.amazonaws.workmail#OrganizationStateException" + }, + { + "target": "com.amazonaws.workmail#UnsupportedOperationException" } ], "traits": { @@ -1968,9 +2029,9 @@ } }, "ResourceId": { - "target": "com.amazonaws.workmail#ResourceId", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the resource to be deleted.
", + "smithy.api#documentation": "The identifier of the resource to be deleted.
\nThe identifier can accept ResourceId, or Resourcename. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nResource name: resource
\nThe identifier of the user to be deleted.
", + "smithy.api#documentation": "The identifier of the user to be deleted.
\nThe identifier can be the UserId or Username. The following identity formats are available:
\nUser ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nUser name: user
\nThe identifier for the member (user or group) to be updated.
", + "smithy.api#documentation": "The identifier for the member to be updated.
\nThe identifier can be UserId, ResourceId, or Group Id, Username, Resourcename, or Groupname, or email.
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nReturns basic details about an entity in WorkMail.
", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workmail#DescribeEntityRequest": { + "type": "structure", + "members": { + "OrganizationId": { + "target": "com.amazonaws.workmail#OrganizationId", + "traits": { + "smithy.api#documentation": "The identifier for the organization under which the entity exists.
", + "smithy.api#required": {} + } + }, + "Email": { + "target": "com.amazonaws.workmail#EmailAddress", + "traits": { + "smithy.api#documentation": "The email under which the entity exists.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workmail#DescribeEntityResponse": { + "type": "structure", + "members": { + "EntityId": { + "target": "com.amazonaws.workmail#WorkMailIdentifier", + "traits": { + "smithy.api#documentation": "The entity ID under which the entity exists.
" + } + }, + "Name": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Username, GroupName, or ResourceName based on entity type.
" + } + }, + "Type": { + "target": "com.amazonaws.workmail#EntityType", + "traits": { + "smithy.api#documentation": "Entity type.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workmail#DescribeGroup": { "type": "operation", "input": { @@ -2322,9 +2458,9 @@ } }, "GroupId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier for the group to be described.
", + "smithy.api#documentation": "The identifier for the group to be described.
\nThe identifier can accept GroupId, Groupname, or email. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: group@domain.tld
\nGroup name: group
\nThe date and time when a user was deregistered from WorkMail, in UNIX epoch time\n format.
" } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If the value is set to true, the group is hidden from the address book.
" + } } }, "traits": { @@ -2650,6 +2793,19 @@ "traits": { "smithy.api#documentation": "The Amazon Resource Name (ARN) of the organization.
" } + }, + "MigrationAdmin": { + "target": "com.amazonaws.workmail#WorkMailIdentifier", + "traits": { + "smithy.api#documentation": "The user ID of the migration admin if migration is enabled for the organization.
" + } + }, + "InteroperabilityEnabled": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "Indicates if interoperability is enabled for this organization.
" + } } }, "traits": { @@ -2676,6 +2832,9 @@ }, { "target": "com.amazonaws.workmail#OrganizationStateException" + }, + { + "target": "com.amazonaws.workmail#UnsupportedOperationException" } ], "traits": { @@ -2694,9 +2853,9 @@ } }, "ResourceId": { - "target": "com.amazonaws.workmail#ResourceId", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the resource to be described.
", + "smithy.api#documentation": "The identifier of the resource to be described.
\nThe identifier can accept ResourceId, Resourcename, or email. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nEmail address: resource@domain.tld
\nResource name: resource
\nThe date and time when a resource was disabled from WorkMail, in UNIX epoch time\n format.
" } + }, + "Description": { + "target": "com.amazonaws.workmail#ResourceDescription", + "traits": { + "smithy.api#documentation": "Description of the resource.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If enabled, the resource is hidden from the global address list.
" + } } }, "traits": { @@ -2799,9 +2971,9 @@ } }, "UserId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier for the user to be described.
", + "smithy.api#documentation": "The identifier for the user to be described.
\nThe identifier can be the UserId, Username, or email. The following identity formats are available:
\nUser ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: user@domain.tld
\nUser name: user
\nThe display name of the user.
" } @@ -2846,7 +3018,7 @@ "UserRole": { "target": "com.amazonaws.workmail#UserRole", "traits": { - "smithy.api#documentation": "In certain cases, other entities are modeled as users. If interoperability is\n enabled, resources are imported into WorkMail as users. Because different WorkMail\n organizations rely on different directory types, administrators can distinguish between an\n unregistered user (account is disabled and has a user role) and the directory\n administrators. The values are USER, RESOURCE, and SYSTEM_USER.
" + "smithy.api#documentation": "In certain cases, other entities are modeled as users. If interoperability is\n enabled, resources are imported into WorkMail as users. Because different WorkMail\n organizations rely on different directory types, administrators can distinguish between an\n unregistered user (account is disabled and has a user role) and the directory\n administrators. The values are USER, RESOURCE, SYSTEM_USER, and REMOTE_USER.
" } }, "EnabledDate": { @@ -2860,6 +3032,97 @@ "traits": { "smithy.api#documentation": "The date and time at which the user was disabled for WorkMail usage, in UNIX epoch\n time format.
" } + }, + "MailboxProvisionedDate": { + "target": "com.amazonaws.workmail#Timestamp", + "traits": { + "smithy.api#documentation": "The date when the mailbox was created for the user.
" + } + }, + "MailboxDeprovisionedDate": { + "target": "com.amazonaws.workmail#Timestamp", + "traits": { + "smithy.api#documentation": "The date when the mailbox was removed for the user.
" + } + }, + "FirstName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "First name of the user.
" + } + }, + "LastName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Last name of the user.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "If enabled, the user is hidden from the global address list.
" + } + }, + "Initials": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Initials of the user.
" + } + }, + "Telephone": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "User's contact number.
" + } + }, + "Street": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Street where the user is located.
" + } + }, + "JobTitle": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Job title of the user.
" + } + }, + "City": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "City where the user is located.
" + } + }, + "Company": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Company of the user.
" + } + }, + "ZipCode": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Zip code of the user.
" + } + }, + "Department": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Department of the user.
" + } + }, + "Country": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Country where the user is located.
" + } + }, + "Office": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Office where the user is located.
" + } } }, "traits": { @@ -3043,6 +3306,9 @@ }, { "target": "com.amazonaws.workmail#OrganizationStateException" + }, + { + "target": "com.amazonaws.workmail#UnsupportedOperationException" } ], "traits": { @@ -3061,16 +3327,16 @@ } }, "ResourceId": { - "target": "com.amazonaws.workmail#ResourceId", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the resource from which delegates' set members are removed.\n
", + "smithy.api#documentation": "The identifier of the resource from which delegates' set members are removed.\n
\nThe identifier can accept ResourceId, Resourcename, or email. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nEmail address: resource@domain.tld
\nResource name: resource
\nThe identifier for the member (user, group) to be removed from the resource's\n delegates.
", + "smithy.api#documentation": "The identifier for the member (user, group) to be removed from the resource's\n delegates.
\nThe entity ID can accept UserId or GroupID, Username or Groupname, or email.
\nEntity: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity: entity
\nThe identifier for the group from which members are removed.
", + "smithy.api#documentation": "The identifier for the group from which members are removed.
\nThe identifier can accept GroupId, Groupname, or email. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: group@domain.tld
\nGroup name: group
\nThe identifier for the member to be removed to the group.
", + "smithy.api#documentation": "The identifier for the member to be removed from the group.
\nThe member ID can accept UserID or GroupId, Username or Groupname, or email.
\nMember ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: member@domain.tld
\nMember name: member
\nThe fully qualified domain name.
" + "smithy.api#documentation": "The fully qualified domain name.
", + "smithy.api#required": {} } }, "HostedZoneId": { @@ -3243,7 +3510,7 @@ "min": 3, "max": 255 }, - "smithy.api#pattern": "^[a-zA-Z0-9.-]+\\.[a-zA-Z-]{2,}$" + "smithy.api#pattern": "^[a-zA-Z0-9.-]+$" } }, "com.amazonaws.workmail#Domains": { @@ -3349,6 +3616,29 @@ "smithy.api#error": "client" } }, + "com.amazonaws.workmail#EntityType": { + "type": "enum", + "members": { + "GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GROUP" + } + }, + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USER" + } + }, + "RESOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RESOURCE" + } + } + } + }, "com.amazonaws.workmail#EwsAvailabilityProvider": { "type": "structure", "members": { @@ -3910,6 +4200,9 @@ { "target": "com.amazonaws.workmail#EntityNotFoundException" }, + { + "target": "com.amazonaws.workmail#InvalidParameterException" + }, { "target": "com.amazonaws.workmail#OrganizationNotFoundException" }, @@ -3933,9 +4226,9 @@ } }, "UserId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier for the user whose mailbox details are being requested.
", + "smithy.api#documentation": "The identifier for the user whose mailbox details are being requested.
\nThe identifier can be the UserId, Username, or email. The following identity formats are available:
\nUser ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: user@domain.tld
\nUser name: user
\nThe representation of an WorkMail group.
" } }, - "com.amazonaws.workmail#GroupName": { - "type": "string", - "traits": { - "smithy.api#length": { + "com.amazonaws.workmail#GroupIdentifier": { + "type": "structure", + "members": { + "GroupId": { + "target": "com.amazonaws.workmail#WorkMailIdentifier", + "traits": { + "smithy.api#documentation": "Group ID that matched the group.
" + } + }, + "GroupName": { + "target": "com.amazonaws.workmail#GroupName", + "traits": { + "smithy.api#documentation": "Group name that matched the group.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The identifier that contains the Group ID and name of a group.
" + } + }, + "com.amazonaws.workmail#GroupIdentifiers": { + "type": "list", + "member": { + "target": "com.amazonaws.workmail#GroupIdentifier" + } + }, + "com.amazonaws.workmail#GroupName": { + "type": "string", + "traits": { + "smithy.api#length": { "min": 1, "max": 256 }, @@ -4216,7 +4535,7 @@ "min": 1, "max": 32 }, - "smithy.api#pattern": "^[\\S\\s]*$" + "smithy.api#pattern": "^[^/\\\\]*$" } }, "com.amazonaws.workmail#IdempotencyClientToken": { @@ -4858,9 +5177,9 @@ } }, "GroupId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier for the group to which the members (users or groups) are\n associated.
", + "smithy.api#documentation": "The identifier for the group to which the members (users or groups) are\n associated.
\nThe identifier can accept GroupId, Groupname, or email. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: group@domain.tld
\nGroup name: group
\nFilters only groups with the provided name prefix.
" + } + }, + "PrimaryEmailPrefix": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Filters only groups with the provided primary email prefix.
" + } + }, + "State": { + "target": "com.amazonaws.workmail#EntityState", + "traits": { + "smithy.api#documentation": "Filters only groups with the provided state.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Filtering options for ListGroups operation. This is only used as input to Operation.
" + } + }, + "com.amazonaws.workmail#ListGroupsForEntity": { + "type": "operation", + "input": { + "target": "com.amazonaws.workmail#ListGroupsForEntityRequest" + }, + "output": { + "target": "com.amazonaws.workmail#ListGroupsForEntityResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workmail#EntityNotFoundException" + }, + { + "target": "com.amazonaws.workmail#EntityStateException" + }, + { + "target": "com.amazonaws.workmail#InvalidParameterException" + }, + { + "target": "com.amazonaws.workmail#OrganizationNotFoundException" + }, + { + "target": "com.amazonaws.workmail#OrganizationStateException" + } + ], + "traits": { + "smithy.api#documentation": "Returns all the groups to which an entity belongs.
", + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.workmail#ListGroupsForEntityFilters": { + "type": "structure", + "members": { + "GroupNamePrefix": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Filters only group names that start with the provided name prefix.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Filtering options for ListGroupsForEntity operation. This is only used as input to Operation.
" + } + }, + "com.amazonaws.workmail#ListGroupsForEntityRequest": { + "type": "structure", + "members": { + "OrganizationId": { + "target": "com.amazonaws.workmail#OrganizationId", + "traits": { + "smithy.api#documentation": "The identifier for the organization under which the entity exists.
", + "smithy.api#required": {} + } + }, + "EntityId": { + "target": "com.amazonaws.workmail#EntityIdentifier", + "traits": { + "smithy.api#documentation": "The identifier for the entity.
\nThe entity ID can accept UserId or GroupID, Username or Groupname, or email.
\nEntity ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nLimit the search results based on the filter criteria.
" + } + }, + "NextToken": { + "target": "com.amazonaws.workmail#NextToken", + "traits": { + "smithy.api#documentation": "The token to use to retrieve the next page of results. The first call does not contain any tokens.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.workmail#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return in a single call.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workmail#ListGroupsForEntityResponse": { + "type": "structure", + "members": { + "Groups": { + "target": "com.amazonaws.workmail#GroupIdentifiers", + "traits": { + "smithy.api#documentation": "The overview of groups in an organization.
" + } + }, + "NextToken": { + "target": "com.amazonaws.workmail#NextToken", + "traits": { + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is `null` when there are no more results to return.
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workmail#ListGroupsRequest": { "type": "structure", "members": { @@ -4954,6 +5408,12 @@ "traits": { "smithy.api#documentation": "The maximum number of results to return in a single call.
" } + }, + "Filters": { + "target": "com.amazonaws.workmail#ListGroupsFilters", + "traits": { + "smithy.api#documentation": "Limit the search results based on the filter criteria. Only one filter per request is supported.
" + } } }, "traits": { @@ -5250,9 +5710,9 @@ } }, "EntityId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the user, group, or resource for which to list mailbox\n permissions.
", + "smithy.api#documentation": "The identifier of the user, or resource for which to list mailbox\n permissions.
\nThe entity ID can accept UserId or ResourceId, Username or Resourcename, or email.
\nEntity ID: 12345678-1234-1234-1234-123456789012, or r-0123456789a0123456789b0123456789
\nEmail address: entity@domain.tld
\nEntity name: entity
\nThe identifier for the resource whose delegates are listed.
", + "smithy.api#documentation": "The identifier for the resource whose delegates are listed.
\nThe identifier can accept ResourceId, Resourcename, or email. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nEmail address: resource@domain.tld
\nResource name: resource
\nFilters only resource that start with the entered name prefix .
" + } + }, + "PrimaryEmailPrefix": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Filters only resource with the provided primary email prefix.
" + } + }, + "State": { + "target": "com.amazonaws.workmail#EntityState", + "traits": { + "smithy.api#documentation": "Filters only resource with the provided state.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Filtering options for ListResources operation. This is only used as input to Operation.
" + } + }, "com.amazonaws.workmail#ListResourcesRequest": { "type": "structure", "members": { @@ -5637,6 +6129,12 @@ "traits": { "smithy.api#documentation": "The maximum number of results to return in a single call.
" } + }, + "Filters": { + "target": "com.amazonaws.workmail#ListResourcesFilters", + "traits": { + "smithy.api#documentation": "Limit the resource search results based on the filter criteria. You can only use one filter per request.
" + } } }, "traits": { @@ -5738,6 +6236,38 @@ } } }, + "com.amazonaws.workmail#ListUsersFilters": { + "type": "structure", + "members": { + "UsernamePrefix": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Filters only users with the provided username prefix.
" + } + }, + "DisplayNamePrefix": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Filters only users with the provided display name prefix.
" + } + }, + "PrimaryEmailPrefix": { + "target": "com.amazonaws.workmail#String", + "traits": { + "smithy.api#documentation": "Filters only users with the provided email prefix.
" + } + }, + "State": { + "target": "com.amazonaws.workmail#EntityState", + "traits": { + "smithy.api#documentation": "Filters only users with the provided state.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Filtering options for ListUsers operation. This is only used as input to Operation.
" + } + }, "com.amazonaws.workmail#ListUsersRequest": { "type": "structure", "members": { @@ -5759,6 +6289,12 @@ "traits": { "smithy.api#documentation": "The maximum number of results to return in a single call.
" } + }, + "Filters": { + "target": "com.amazonaws.workmail#ListUsersFilters", + "traits": { + "smithy.api#documentation": "Limit the user search results based on the filter criteria. You can only use one filter per request.
" + } } }, "traits": { @@ -6308,6 +6844,15 @@ "smithy.api#error": "client" } }, + "com.amazonaws.workmail#NewResourceDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + } + } + }, "com.amazonaws.workmail#NextToken": { "type": "string", "traits": { @@ -6779,16 +7324,16 @@ } }, "EntityId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the user, group, or resource for which to update mailbox\n permissions.
", + "smithy.api#documentation": "The identifier of the user or resource for which to update mailbox\n permissions.
\nThe identifier can be UserId, ResourceID, or Group Id, Username, Resourcename, or Groupname, or email.
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nThe identifier of the user, group, or resource to which to grant the\n permissions.
", + "smithy.api#documentation": "The identifier of the user, group, or resource to which to grant the\n permissions.
\nThe identifier can be UserId, ResourceID, or Group Id, Username, Resourcename, or Groupname, or email.
\nGrantee ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: grantee@domain.tld
\nGrantee name: grantee
\nThe identifier for the user, group, or resource to be updated.
", + "smithy.api#documentation": "The identifier for the user, group, or resource to be updated.
\nThe identifier can accept UserId, ResourceId, or GroupId, or Username, Resourcename, or Groupname. The following identity formats are available:
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEntity name: entity
\nThe date indicating when the resource was disabled from WorkMail use.
" } + }, + "Description": { + "target": "com.amazonaws.workmail#ResourceDescription", + "traits": { + "smithy.api#documentation": "Resource description.
" + } } }, "traits": { @@ -7280,6 +7831,15 @@ "target": "com.amazonaws.workmail#Delegate" } }, + "com.amazonaws.workmail#ResourceDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, "com.amazonaws.workmail#ResourceId": { "type": "string", "traits": { @@ -7374,7 +7934,8 @@ "smithy.api#length": { "min": 20, "max": 2048 - } + }, + "smithy.api#pattern": "^arn:aws:iam:[a-z0-9-]*:[a-z0-9-]+:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" } }, "com.amazonaws.workmail#S3BucketName": { @@ -7456,9 +8017,9 @@ } }, "EntityId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifier of the user or resource associated with the mailbox.
", + "smithy.api#documentation": "The identifier of the user or resource associated with the mailbox.
\nThe identifier can accept UserId or ResourceId, Username or Resourcename, or email. The following identity formats are available:
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789\n , or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nUpdates attibutes in a group.
", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workmail#UpdateGroupRequest": { + "type": "structure", + "members": { + "OrganizationId": { + "target": "com.amazonaws.workmail#OrganizationId", + "traits": { + "smithy.api#documentation": "The identifier for the organization under which the group exists.
", + "smithy.api#required": {} + } + }, + "GroupId": { + "target": "com.amazonaws.workmail#EntityIdentifier", + "traits": { + "smithy.api#documentation": "The identifier for the group to be updated.
\nThe identifier can accept GroupId, Groupname, or email. The following identity formats are available:
\nGroup ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: group@domain.tld
\nGroup name: group
\nIf enabled, the group is hidden from the global address list.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workmail#UpdateGroupResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workmail#UpdateImpersonationRole": { "type": "operation", "input": { @@ -8059,9 +8691,9 @@ } }, "UserId": { - "target": "com.amazonaws.workmail#WorkMailIdentifier", + "target": "com.amazonaws.workmail#EntityIdentifier", "traits": { - "smithy.api#documentation": "The identifer for the user for whom to update the mailbox quota.
", + "smithy.api#documentation": "The identifer for the user for whom to update the mailbox quota.
\nThe identifier can be the UserId, Username, or email. The following identity formats are available:
\nUser ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: user@domain.tld
\nUser name: user
\nThe user, group, or resource to update.
", + "smithy.api#documentation": "The user, group, or resource to update.
\nThe identifier can accept UseriD, ResourceId, or GroupId, Username, Resourcename, or Groupname, or email. The following identity formats are available:
\nEntity ID: 12345678-1234-1234-1234-123456789012, r-0123456789a0123456789b0123456789, or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: entity@domain.tld
\nEntity name: entity
\nThe identifier of the resource to be updated.
", + "smithy.api#documentation": "The identifier of the resource to be updated.
\nThe identifier can accept ResourceId, Resourcename, or email. The following identity formats are available:
\nResource ID: r-0123456789a0123456789b0123456789
\nEmail address: resource@domain.tld
\nResource name: resource
\nThe resource's booking options to be updated.
" } + }, + "Description": { + "target": "com.amazonaws.workmail#NewResourceDescription", + "traits": { + "smithy.api#documentation": "Updates the resource description.
" + } + }, + "Type": { + "target": "com.amazonaws.workmail#ResourceType", + "traits": { + "smithy.api#documentation": "Updates the resource type.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#BooleanObject", + "traits": { + "smithy.api#documentation": "If enabled, the resource is hidden from the global address list.
" + } } }, "traits": { @@ -8377,6 +9033,164 @@ "smithy.api#output": {} } }, + "com.amazonaws.workmail#UpdateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.workmail#UpdateUserRequest" + }, + "output": { + "target": "com.amazonaws.workmail#UpdateUserResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workmail#DirectoryServiceAuthenticationFailedException" + }, + { + "target": "com.amazonaws.workmail#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.workmail#EntityNotFoundException" + }, + { + "target": "com.amazonaws.workmail#EntityStateException" + }, + { + "target": "com.amazonaws.workmail#InvalidParameterException" + }, + { + "target": "com.amazonaws.workmail#OrganizationNotFoundException" + }, + { + "target": "com.amazonaws.workmail#OrganizationStateException" + }, + { + "target": "com.amazonaws.workmail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "Updates data for the user. To have the latest information, it must be preceded by a\n DescribeUser call. The dataset in the request should be the one\n expected when performing another DescribeUser
call.
The identifier for the organization under which the user exists.
", + "smithy.api#required": {} + } + }, + "UserId": { + "target": "com.amazonaws.workmail#EntityIdentifier", + "traits": { + "smithy.api#documentation": "The identifier for the user to be updated.
\nThe identifier can be the UserId, Username, or email. The following identity formats are available:
\nUser ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234
\nEmail address: user@domain.tld
\nUser name: user
\nUpdates the user role.
\nYou cannot pass SYSTEM_USER or RESOURCE.
" + } + }, + "DisplayName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the display name of the user.
" + } + }, + "FirstName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's first name.
" + } + }, + "LastName": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's last name.
" + } + }, + "HiddenFromGlobalAddressList": { + "target": "com.amazonaws.workmail#BooleanObject", + "traits": { + "smithy.api#documentation": "If enabled, the user is hidden from the global address list.
" + } + }, + "Initials": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's initials.
" + } + }, + "Telephone": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's contact details.
" + } + }, + "Street": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's street address.
" + } + }, + "JobTitle": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's job title.
" + } + }, + "City": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's city.
" + } + }, + "Company": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's company.
" + } + }, + "ZipCode": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's zipcode.
" + } + }, + "Department": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's department.
" + } + }, + "Country": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's country.
" + } + }, + "Office": { + "target": "com.amazonaws.workmail#UserAttribute", + "traits": { + "smithy.api#documentation": "Updates the user's office.
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workmail#UpdateUserResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workmail#Url": { "type": "string", "traits": { @@ -8443,6 +9257,16 @@ "smithy.api#documentation": "The representation of an WorkMail user.
" } }, + "com.amazonaws.workmail#UserAttribute": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.workmail#UserIdList": { "type": "list", "member": { @@ -8485,6 +9309,12 @@ "traits": { "smithy.api#enumValue": "SYSTEM_USER" } + }, + "REMOTE_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REMOTE_USER" + } } } }, @@ -8501,7 +9331,7 @@ "min": 3, "max": 209 }, - "smithy.api#pattern": "^[a-zA-Z0-9.-]+\\.[a-zA-Z-]{2,}$" + "smithy.api#pattern": "^[a-zA-Z0-9.-]+$" } }, "com.amazonaws.workmail#WorkMailIdentifier": { @@ -8601,6 +9431,9 @@ { "target": "com.amazonaws.workmail#DescribeEmailMonitoringConfiguration" }, + { + "target": "com.amazonaws.workmail#DescribeEntity" + }, { "target": "com.amazonaws.workmail#DescribeGroup" }, @@ -8664,6 +9497,9 @@ { "target": "com.amazonaws.workmail#ListGroups" }, + { + "target": "com.amazonaws.workmail#ListGroupsForEntity" + }, { "target": "com.amazonaws.workmail#ListImpersonationRoles" }, @@ -8742,6 +9578,9 @@ { "target": "com.amazonaws.workmail#UpdateDefaultMailDomain" }, + { + "target": "com.amazonaws.workmail#UpdateGroup" + }, { "target": "com.amazonaws.workmail#UpdateImpersonationRole" }, @@ -8756,6 +9595,9 @@ }, { "target": "com.amazonaws.workmail#UpdateResource" + }, + { + "target": "com.amazonaws.workmail#UpdateUser" } ], "traits": { diff --git a/gradle.properties b/gradle.properties index 9e402be5fee..7a2bb719590 100644 --- a/gradle.properties +++ b/gradle.properties @@ -12,7 +12,7 @@ sdkVersion=0.32.3-SNAPSHOT smithyVersion=1.39.0 smithyGradleVersion=0.6.0 # smithy-kotlin codegen and runtime are versioned together -smithyKotlinVersion=0.27.5-SNAPSHOT +smithyKotlinVersion=0.27.5 # kotlin kotlinVersion=1.8.22