diff --git a/CHANGELOG.md b/CHANGELOG.md index 88225165293..8904dcb2b12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +Release v1.41.17 (2021-11-03) +=== + +### Service Client Updates +* `service/connectparticipant`: Updates service API +* `service/datasync`: Updates service API and documentation +* `service/finspace`: Updates service API and documentation +* `service/macie2`: Updates service API and documentation + Release v1.41.16 (2021-11-02) === diff --git a/aws/version.go b/aws/version.go index af55d73ce21..1cfa43cb3a0 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.41.16" +const SDKVersion = "1.41.17" diff --git a/models/apis/connectparticipant/2018-09-07/api-2.json b/models/apis/connectparticipant/2018-09-07/api-2.json index 68e1fb51c3a..8b79633fae6 100644 --- a/models/apis/connectparticipant/2018-09-07/api-2.json +++ b/models/apis/connectparticipant/2018-09-07/api-2.json @@ -216,11 +216,7 @@ "MESSAGE", "EVENT", "ATTACHMENT", - "CONNECTION_ACK", - "PARTICIPANT_ACTIVE", - "PARTICIPANT_INACTIVE", - "PARTICIPANT_ENGAGED", - "PARTICIPANT_DISENGAGED" + "CONNECTION_ACK" ] }, "ClientToken":{ diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index d1990284bfe..70e4e313ee1 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -66,6 +66,19 @@ {"shape":"InternalException"} ] }, + "CreateLocationHdfs":{ + "name":"CreateLocationHdfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateLocationHdfsRequest"}, + "output":{"shape":"CreateLocationHdfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "CreateLocationNfs":{ "name":"CreateLocationNfs", "http":{ @@ -209,6 +222,19 @@ {"shape":"InternalException"} ] }, + "DescribeLocationHdfs":{ + "name":"DescribeLocationHdfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeLocationHdfsRequest"}, + "output":{"shape":"DescribeLocationHdfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "DescribeLocationNfs":{ "name":"DescribeLocationNfs", "http":{ @@ -404,6 +430,19 @@ {"shape":"InternalException"} ] }, + "UpdateLocationHdfs":{ + "name":"UpdateLocationHdfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationHdfsRequest"}, + "output":{"shape":"UpdateLocationHdfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ] + }, "UpdateLocationNfs":{ "name":"UpdateLocationNfs", "http":{ @@ -590,6 +629,35 @@ "LocationArn":{"shape":"LocationArn"} } }, + "CreateLocationHdfsRequest":{ + "type":"structure", + "required":[ + "NameNodes", + "AuthenticationType", + "AgentArns" + ], + "members":{ + "Subdirectory":{"shape":"HdfsSubdirectory"}, + "NameNodes":{"shape":"HdfsNameNodeList"}, + "BlockSize":{"shape":"HdfsBlockSize"}, + "ReplicationFactor":{"shape":"HdfsReplicationFactor"}, + "KmsKeyProviderUri":{"shape":"KmsKeyProviderUri"}, + "QopConfiguration":{"shape":"QopConfiguration"}, + "AuthenticationType":{"shape":"HdfsAuthenticationType"}, + "SimpleUser":{"shape":"HdfsUser"}, + "KerberosPrincipal":{"shape":"KerberosPrincipal"}, + "KerberosKeytab":{"shape":"KerberosKeytabFile"}, + "KerberosKrb5Conf":{"shape":"KerberosKrb5ConfFile"}, + "AgentArns":{"shape":"AgentArnList"}, + "Tags":{"shape":"InputTagList"} + } + }, + "CreateLocationHdfsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, "CreateLocationNfsRequest":{ "type":"structure", "required":[ @@ -796,6 +864,30 @@ "Domain":{"shape":"SmbDomain"} } }, + "DescribeLocationHdfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"} + } + }, + "DescribeLocationHdfsResponse":{ + "type":"structure", + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "LocationUri":{"shape":"LocationUri"}, + "NameNodes":{"shape":"HdfsNameNodeList"}, + "BlockSize":{"shape":"HdfsBlockSize"}, + "ReplicationFactor":{"shape":"HdfsReplicationFactor"}, + "KmsKeyProviderUri":{"shape":"KmsKeyProviderUri"}, + "QopConfiguration":{"shape":"QopConfiguration"}, + "AuthenticationType":{"shape":"HdfsAuthenticationType"}, + "SimpleUser":{"shape":"HdfsUser"}, + "KerberosPrincipal":{"shape":"KerberosPrincipal"}, + "AgentArns":{"shape":"AgentArnList"}, + "CreationTime":{"shape":"Time"} + } + }, "DescribeLocationNfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -1033,6 +1125,82 @@ "BOTH" ] }, + "HdfsAuthenticationType":{ + "type":"string", + "enum":[ + "SIMPLE", + "KERBEROS" + ] + }, + "HdfsBlockSize":{ + "type":"integer", + "box":true, + "max":1073741824, + "min":1048576 + }, + "HdfsDataTransferProtection":{ + "type":"string", + "enum":[ + "DISABLED", + "AUTHENTICATION", + "INTEGRITY", + "PRIVACY" + ] + }, + "HdfsNameNode":{ + "type":"structure", + "required":[ + "Hostname", + "Port" + ], + "members":{ + "Hostname":{"shape":"HdfsServerHostname"}, + "Port":{"shape":"HdfsServerPort"} + } + }, + "HdfsNameNodeList":{ + "type":"list", + "member":{"shape":"HdfsNameNode"}, + "min":1 + }, + "HdfsReplicationFactor":{ + "type":"integer", + "box":true, + "max":512, + "min":1 + }, + "HdfsRpcProtection":{ + "type":"string", + "enum":[ + "DISABLED", + "AUTHENTICATION", + "INTEGRITY", + "PRIVACY" + ] + }, + "HdfsServerHostname":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])$" + }, + "HdfsServerPort":{ + "type":"integer", + "box":true, + "max":65536, + "min":1 + }, + "HdfsSubdirectory":{ + "type":"string", + "max":4096, + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\$\\p{Zs}]+$" + }, + "HdfsUser":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[_.A-Za-z0-9][-_.A-Za-z0-9]*$" + }, "IamRoleArn":{ "type":"string", "max":2048, @@ -1061,6 +1229,26 @@ }, "exception":true }, + "KerberosKeytabFile":{ + "type":"blob", + "max":65536 + }, + "KerberosKrb5ConfFile":{ + "type":"blob", + "max":131072 + }, + "KerberosPrincipal":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^.+$" + }, + "KmsKeyProviderUri":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^kms:\\/\\/http[s]?@(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(;(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9]))*:[0-9]{1,5}\\/kms$" + }, "ListAgentsRequest":{ "type":"structure", "members":{ @@ -1373,6 +1561,13 @@ "SecurityGroupArns":{"shape":"PLSecurityGroupArnList"} } }, + "QopConfiguration":{ + "type":"structure", + "members":{ + "RpcProtection":{"shape":"HdfsRpcProtection"}, + "DataTransferProtection":{"shape":"HdfsDataTransferProtection"} + } + }, "S3BucketArn":{ "type":"string", "max":156, @@ -1675,6 +1870,30 @@ "members":{ } }, + "UpdateLocationHdfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{"shape":"LocationArn"}, + "Subdirectory":{"shape":"HdfsSubdirectory"}, + "NameNodes":{"shape":"HdfsNameNodeList"}, + "BlockSize":{"shape":"HdfsBlockSize"}, + "ReplicationFactor":{"shape":"HdfsReplicationFactor"}, + "KmsKeyProviderUri":{"shape":"KmsKeyProviderUri"}, + "QopConfiguration":{"shape":"QopConfiguration"}, + "AuthenticationType":{"shape":"HdfsAuthenticationType"}, + "SimpleUser":{"shape":"HdfsUser"}, + "KerberosPrincipal":{"shape":"KerberosPrincipal"}, + "KerberosKeytab":{"shape":"KerberosKeytabFile"}, + "KerberosKrb5Conf":{"shape":"KerberosKrb5ConfFile"}, + "AgentArns":{"shape":"AgentArnList"} + } + }, + "UpdateLocationHdfsResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationNfsRequest":{ "type":"structure", "required":["LocationArn"], diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 5de30f78263..1b969a43700 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -6,6 +6,7 @@ "CreateAgent": "

Activates an DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region.

You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet.

You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run.

Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.

", "CreateLocationEfs": "

Creates an endpoint for an Amazon EFS file system.

", "CreateLocationFsxWindows": "

Creates an endpoint for an Amazon FSx for Windows File Server file system.

", + "CreateLocationHdfs": "

Creates an endpoint for a Hadoop Distributed File System (HDFS).

", "CreateLocationNfs": "

Defines a file system on a Network File System (NFS) server that can be read from or written to.

", "CreateLocationObjectStorage": "

Creates an endpoint for a self-managed object storage bucket. For more information about self-managed object storage locations, see Creating a location for object storage.

", "CreateLocationS3": "

Creates an endpoint for an Amazon S3 bucket.

For more information, see Create an Amazon S3 location in the DataSync User Guide.

", @@ -17,6 +18,7 @@ "DescribeAgent": "

Returns metadata such as the name, the network interfaces, and the status (that is, whether the agent is running or not) for an agent. To specify which agent to describe, use the Amazon Resource Name (ARN) of the agent in your request.

", "DescribeLocationEfs": "

Returns metadata, such as the path information about an Amazon EFS location.

", "DescribeLocationFsxWindows": "

Returns metadata, such as the path information about an Amazon FSx for Windows File Server location.

", + "DescribeLocationHdfs": "

Returns metadata, such as the authentication information about the Hadoop Distributed File System (HDFS) location.

", "DescribeLocationNfs": "

Returns metadata, such as the path information, about an NFS location.

", "DescribeLocationObjectStorage": "

Returns metadata about a self-managed object storage server location. For more information about self-managed object storage locations, see Creating a location for object storage.

", "DescribeLocationS3": "

Returns metadata, such as bucket name, about an Amazon S3 bucket location.

", @@ -32,6 +34,7 @@ "TagResource": "

Applies a key-value pair to an Amazon Web Services resource.

", "UntagResource": "

Removes a tag from an Amazon Web Services resource.

", "UpdateAgent": "

Updates the name of an agent.

", + "UpdateLocationHdfs": "

Updates some parameters of a previously created location for a Hadoop Distributed File System cluster.

", "UpdateLocationNfs": "

Updates some of the parameters of a previously created location for Network File System (NFS) access. For information about creating an NFS location, see Creating a location for NFS.

", "UpdateLocationObjectStorage": "

Updates some of the parameters of a previously created location for self-managed object storage server access. For information about creating a self-managed object storage location, see Creating a location for object storage.

", "UpdateLocationSmb": "

Updates some of the parameters of a previously created location for Server Message Block (SMB) file system access. For information about creating an SMB location, see Creating a location for SMB.

", @@ -60,13 +63,16 @@ "AgentArnList": { "base": null, "refs": { + "CreateLocationHdfsRequest$AgentArns": "

The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.

", "CreateLocationObjectStorageRequest$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "CreateLocationS3Request$AgentArns": "

If you are using DataSync on an Amazon Web Services Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.

", "CreateLocationSmbRequest$AgentArns": "

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

", + "DescribeLocationHdfsResponse$AgentArns": "

The ARNs of the agents that are used to connect to the HDFS cluster.

", "DescribeLocationObjectStorageResponse$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "DescribeLocationS3Response$AgentArns": "

If you are using DataSync on an Amazon Web Services Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.

", "DescribeLocationSmbResponse$AgentArns": "

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

", "OnPremConfig$AgentArns": "

ARNs of the agents to use for an NFS location.

", + "UpdateLocationHdfsRequest$AgentArns": "

The ARNs of the agents that are used to connect to the HDFS cluster.

", "UpdateLocationObjectStorageRequest$AgentArns": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "UpdateLocationSmbRequest$AgentArns": "

The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block (SMB) location.

" } @@ -142,6 +148,16 @@ "refs": { } }, + "CreateLocationHdfsRequest": { + "base": null, + "refs": { + } + }, + "CreateLocationHdfsResponse": { + "base": null, + "refs": { + } + }, "CreateLocationNfsRequest": { "base": "

CreateLocationNfsRequest

", "refs": { @@ -252,6 +268,16 @@ "refs": { } }, + "DescribeLocationHdfsRequest": { + "base": null, + "refs": { + } + }, + "DescribeLocationHdfsResponse": { + "base": null, + "refs": { + } + }, "DescribeLocationNfsRequest": { "base": "

DescribeLocationNfsRequest

", "refs": { @@ -315,7 +341,7 @@ "DestinationNetworkInterfaceArns": { "base": null, "refs": { - "DescribeTaskResponse$DestinationNetworkInterfaceArns": "

The Amazon Resource Name (ARN) of the destination ENIs (Elastic Network Interface) that was created for your subnet.

" + "DescribeTaskResponse$DestinationNetworkInterfaceArns": "

The Amazon Resource Names (ARNs) of the destination elastic network interfaces (ENIs) that were created for your subnet.

" } }, "Duration": { @@ -390,15 +416,15 @@ "base": null, "refs": { "CreateTaskRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", - "CreateTaskRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe). For example: \"/folder1|/folder2\"

", + "CreateTaskRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern contains a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", "DescribeTaskExecutionResponse$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", "DescribeTaskExecutionResponse$Includes": "

A list of filter rules that determines which files to include when running a task. The list should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", - "DescribeTaskResponse$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", - "DescribeTaskResponse$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe). For example: \"/folder1|/folder2\"

", - "StartTaskExecutionRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe). For example: \"/folder1|/folder2\"

", - "StartTaskExecutionRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", - "UpdateTaskRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"

", - "UpdateTaskRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe). For example: \"/folder1|/folder2\"

" + "DescribeTaskResponse$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", + "DescribeTaskResponse$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern contains a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", + "StartTaskExecutionRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern should contain a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", + "StartTaskExecutionRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list contains a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", + "UpdateTaskRequest$Excludes": "

A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

", + "UpdateTaskRequest$Includes": "

A list of filter rules that determines which files to include when running a task. The pattern contains a single filter string that consists of the patterns to include. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

" } }, "FilterRule": { @@ -444,6 +470,83 @@ "Options$Gid": "

The POSIX group ID (GID) of the file's owners. This option should only be set for NFS, EFS, and S3 locations. For more information about what metadata is copied by DataSync, see Metadata Copied by DataSync.

Default value: INT_VALUE. This preserves the integer value of the ID.

INT_VALUE: Preserve the integer value of user ID (UID) and GID (recommended).

NONE: Ignore UID and GID.

" } }, + "HdfsAuthenticationType": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$AuthenticationType": "

The type of authentication used to determine the identity of the user.

", + "DescribeLocationHdfsResponse$AuthenticationType": "

The type of authentication used to determine the identity of the user.

", + "UpdateLocationHdfsRequest$AuthenticationType": "

The type of authentication used to determine the identity of the user.

" + } + }, + "HdfsBlockSize": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$BlockSize": "

The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB).

", + "DescribeLocationHdfsResponse$BlockSize": "

The size of the data blocks to write into the HDFS cluster.

", + "UpdateLocationHdfsRequest$BlockSize": "

The size of the data blocks to write into the HDFS cluster.

" + } + }, + "HdfsDataTransferProtection": { + "base": null, + "refs": { + "QopConfiguration$DataTransferProtection": "

The data transfer protection setting configured on the HDFS cluster. This setting corresponds to your dfs.data.transfer.protection setting in the hdfs-site.xml file on your Hadoop cluster.

" + } + }, + "HdfsNameNode": { + "base": "

The NameNode of the Hadoop Distributed File System (HDFS). The NameNode manages the file system's namespace. The NameNode performs operations such as opening, closing, and renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes.

", + "refs": { + "HdfsNameNodeList$member": null + } + }, + "HdfsNameNodeList": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$NameNodes": "

The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You can use only one NameNode.

", + "DescribeLocationHdfsResponse$NameNodes": "

The NameNode that manage the HDFS namespace.

", + "UpdateLocationHdfsRequest$NameNodes": "

The NameNode that manages the HDFS namespace. The NameNode performs operations such as opening, closing, and renaming files and directories. The NameNode contains the information to map blocks of data to the DataNodes. You can use only one NameNode.

" + } + }, + "HdfsReplicationFactor": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$ReplicationFactor": "

The number of DataNodes to replicate the data to when writing to the HDFS cluster. By default, data is replicated to three DataNodes.

", + "DescribeLocationHdfsResponse$ReplicationFactor": "

The number of DataNodes to replicate the data to when writing to the HDFS cluster.

", + "UpdateLocationHdfsRequest$ReplicationFactor": "

The number of DataNodes to replicate the data to when writing to the HDFS cluster.

" + } + }, + "HdfsRpcProtection": { + "base": null, + "refs": { + "QopConfiguration$RpcProtection": "

The RPC protection setting configured on the HDFS cluster. This setting corresponds to your hadoop.rpc.protection setting in your core-site.xml file on your Hadoop cluster.

" + } + }, + "HdfsServerHostname": { + "base": null, + "refs": { + "HdfsNameNode$Hostname": "

The hostname of the NameNode in the HDFS cluster. This value is the IP address or Domain Name Service (DNS) name of the NameNode. An agent that's installed on-premises uses this hostname to communicate with the NameNode in the network.

" + } + }, + "HdfsServerPort": { + "base": null, + "refs": { + "HdfsNameNode$Port": "

The port that the NameNode uses to listen to client requests.

" + } + }, + "HdfsSubdirectory": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$Subdirectory": "

A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS cluster. If the subdirectory isn't specified, it will default to /.

", + "UpdateLocationHdfsRequest$Subdirectory": "

A subdirectory in the HDFS cluster. This subdirectory is used to read data from or write data to the HDFS cluster.

" + } + }, + "HdfsUser": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$SimpleUser": "

The user name used to identify the client on the host operating system.

If SIMPLE is specified for AuthenticationType, this parameter is required.

", + "DescribeLocationHdfsResponse$SimpleUser": "

The user name used to identify the client on the host operating system. This parameter is used if the AuthenticationType is defined as SIMPLE.

", + "UpdateLocationHdfsRequest$SimpleUser": "

The user name used to identify the client on the host operating system.

" + } + }, "IamRoleArn": { "base": null, "refs": { @@ -456,6 +559,7 @@ "CreateAgentRequest$Tags": "

The key-value pair that represents the tag that you want to associate with the agent. The value can be an empty string. This value helps you manage, filter, and search for your agents.

Valid characters for key and value are letters, spaces, and numbers representable in UTF-8 format, and the following special characters: + - = . _ : / @.

", "CreateLocationEfsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", "CreateLocationFsxWindowsRequest$Tags": "

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

", + "CreateLocationHdfsRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", "CreateLocationNfsRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", "CreateLocationObjectStorageRequest$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", "CreateLocationS3Request$Tags": "

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

", @@ -474,6 +578,36 @@ "refs": { } }, + "KerberosKeytabFile": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$KerberosKeytab": "

The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you're using the CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.

If KERBEROS is specified for AuthenticationType, this parameter is required.

", + "UpdateLocationHdfsRequest$KerberosKeytab": "

The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. You can load the keytab from a file by providing the file's address. If you use the AWS CLI, it performs base64 encoding for you. Otherwise, provide the base64-encoded text.

" + } + }, + "KerberosKrb5ConfFile": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$KerberosKrb5Conf": "

The krb5.conf file that contains the Kerberos configuration information. You can load the krb5.conf file by providing the file's address. If you're using the CLI, it performs the base64 encoding for you. Otherwise, provide the base64-encoded text.

If KERBEROS is specified for AuthenticationType, this parameter is required.

", + "UpdateLocationHdfsRequest$KerberosKrb5Conf": "

The krb5.conf file that contains the Kerberos configuration information. You can load the krb5.conf file by providing the file's address. If you're using the AWS CLI, it performs the base64 encoding for you. Otherwise, provide the base64-encoded text.

" + } + }, + "KerberosPrincipal": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$KerberosPrincipal": "

The Kerberos principal with access to the files and folders on the HDFS cluster.

If KERBEROS is specified for AuthenticationType, this parameter is required.

", + "DescribeLocationHdfsResponse$KerberosPrincipal": "

The Kerberos principal with access to the files and folders on the HDFS cluster. This parameter is used if the AuthenticationType is defined as KERBEROS.

", + "UpdateLocationHdfsRequest$KerberosPrincipal": "

The Kerberos principal with access to the files and folders on the HDFS cluster.

" + } + }, + "KmsKeyProviderUri": { + "base": null, + "refs": { + "CreateLocationHdfsRequest$KmsKeyProviderUri": "

The URI of the HDFS cluster's Key Management Server (KMS).

", + "DescribeLocationHdfsResponse$KmsKeyProviderUri": "

The URI of the HDFS cluster's Key Management Server (KMS).

", + "UpdateLocationHdfsRequest$KmsKeyProviderUri": "

The URI of the HDFS cluster's Key Management Server (KMS).

" + } + }, "ListAgentsRequest": { "base": "

ListAgentsRequest

", "refs": { @@ -529,6 +663,7 @@ "refs": { "CreateLocationEfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the Amazon EFS file system location that is created.

", "CreateLocationFsxWindowsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows File Server file system location that is created.

", + "CreateLocationHdfsResponse$LocationArn": "

The ARN of the source HDFS cluster location that's created.

", "CreateLocationNfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the source NFS file system location that is created.

", "CreateLocationObjectStorageResponse$LocationArn": "

The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.

", "CreateLocationS3Response$LocationArn": "

The Amazon Resource Name (ARN) of the source Amazon S3 bucket location that is created.

", @@ -540,6 +675,8 @@ "DescribeLocationEfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the EFS location that was described.

", "DescribeLocationFsxWindowsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows File Server location to describe.

", "DescribeLocationFsxWindowsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the FSx for Windows File Server location that was described.

", + "DescribeLocationHdfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the HDFS cluster location to describe.

", + "DescribeLocationHdfsResponse$LocationArn": "

The ARN of the HDFS cluster location.

", "DescribeLocationNfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the NFS location to describe.

", "DescribeLocationNfsResponse$LocationArn": "

The Amazon Resource Name (ARN) of the NFS location that was described.

", "DescribeLocationObjectStorageRequest$LocationArn": "

The Amazon Resource Name (ARN) of the self-managed object storage server location that was described.

", @@ -551,6 +688,7 @@ "DescribeTaskResponse$SourceLocationArn": "

The Amazon Resource Name (ARN) of the source file system's location.

", "DescribeTaskResponse$DestinationLocationArn": "

The Amazon Resource Name (ARN) of the Amazon Web Services storage resource's location.

", "LocationListEntry$LocationArn": "

The Amazon Resource Name (ARN) of the location. For Network File System (NFS) or Amazon EFS, the location is the export path. For Amazon S3, the location is the prefix path that you want to mount and use as the root of the location.

", + "UpdateLocationHdfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the source HDFS cluster location.

", "UpdateLocationNfsRequest$LocationArn": "

The Amazon Resource Name (ARN) of the NFS location to update.

", "UpdateLocationObjectStorageRequest$LocationArn": "

The Amazon Resource Name (ARN) of the self-managed object storage server location to be updated.

", "UpdateLocationSmbRequest$LocationArn": "

The Amazon Resource Name (ARN) of the SMB location to update.

" @@ -591,6 +729,7 @@ "refs": { "DescribeLocationEfsResponse$LocationUri": "

The URL of the EFS location that was described.

", "DescribeLocationFsxWindowsResponse$LocationUri": "

The URL of the FSx for Windows File Server location that was described.

", + "DescribeLocationHdfsResponse$LocationUri": "

The URI of the HDFS cluster location.

", "DescribeLocationNfsResponse$LocationUri": "

The URL of the source NFS location that was described.

", "DescribeLocationObjectStorageResponse$LocationUri": "

The URL of the source self-managed object storage server location that was described.

", "DescribeLocationS3Response$LocationUri": "

The URL of the Amazon S3 location that was described.

", @@ -603,7 +742,7 @@ "refs": { "CreateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is used to monitor and log events in the task.

", "DescribeTaskResponse$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the task.

For more information on these groups, see Working with Log Groups and Log Streams in the Amazon CloudWatch User Guide.

", - "UpdateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the resource name of the CloudWatch LogGroup.

" + "UpdateTaskRequest$CloudWatchLogGroupArn": "

The Amazon Resource Name (ARN) of the resource name of the Amazon CloudWatch log group.

" } }, "LogLevel": { @@ -668,7 +807,7 @@ "NfsVersion": { "base": null, "refs": { - "NfsMountOptions$Version": "

The specific NFS version that you want DataSync to use to mount your NFS share. If the server refuses to use the version specified, the sync will fail. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

You can specify the following NFS versions:

" + "NfsMountOptions$Version": "

The specific NFS version that you want DataSync to use to mount your NFS share. If the server refuses to use the version specified, the sync will fail. If you don't specify a version, DataSync defaults to AUTOMATIC. That is, DataSync automatically selects a version based on negotiation with the NFS server.

You can specify the following NFS versions:

" } }, "ObjectStorageAccessKey": { @@ -792,6 +931,14 @@ "DescribeAgentResponse$PrivateLinkConfig": "

The subnet and the security group that DataSync used to access a VPC endpoint.

" } }, + "QopConfiguration": { + "base": "

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer privacy settings configured on the Hadoop Distributed File System (HDFS) cluster.

", + "refs": { + "CreateLocationHdfsRequest$QopConfiguration": "

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer protection settings configured on the Hadoop Distributed File System (HDFS) cluster. If QopConfiguration isn't specified, RpcProtection and DataTransferProtection default to PRIVACY. If you set RpcProtection or DataTransferProtection, the other parameter assumes the same value.

", + "DescribeLocationHdfsResponse$QopConfiguration": "

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer protection settings configured on the Hadoop Distributed File System (HDFS) cluster.

", + "UpdateLocationHdfsRequest$QopConfiguration": "

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer privacy settings configured on the Hadoop Distributed File System (HDFS) cluster.

" + } + }, "S3BucketArn": { "base": null, "refs": { @@ -892,7 +1039,7 @@ "SourceNetworkInterfaceArns": { "base": null, "refs": { - "DescribeTaskResponse$SourceNetworkInterfaceArns": "

The Amazon Resource Name (ARN) of the source ENIs (Elastic Network Interface) that was created for your subnet.

" + "DescribeTaskResponse$SourceNetworkInterfaceArns": "

The Amazon Resource Names (ARNs) of the source elastic network interfaces (ENIs) that were created for your subnet.

" } }, "StartTaskExecutionRequest": { @@ -919,7 +1066,7 @@ } }, "TagListEntry": { - "base": "

Represents a single entry in a list of AWS resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.

", + "base": "

Represents a single entry in a list of Amazon Web Services resource tags. TagListEntry returns an array that contains a list of tasks when the ListTagsForResource operation is called.

", "refs": { "InputTagList$member": null, "OutputTagList$member": null @@ -1065,6 +1212,7 @@ "DescribeAgentResponse$CreationTime": "

The time that the agent was activated (that is, created in your account).

", "DescribeLocationEfsResponse$CreationTime": "

The time that the EFS location was created.

", "DescribeLocationFsxWindowsResponse$CreationTime": "

The time that the FSx for Windows File Server location was created.

", + "DescribeLocationHdfsResponse$CreationTime": "

The time that the HDFS location was created.

", "DescribeLocationNfsResponse$CreationTime": "

The time that the NFS location was created.

", "DescribeLocationObjectStorageResponse$CreationTime": "

The time that the self-managed object storage server agent was created.

", "DescribeLocationS3Response$CreationTime": "

The time that the Amazon S3 bucket location was created.

", @@ -1105,6 +1253,16 @@ "refs": { } }, + "UpdateLocationHdfsRequest": { + "base": null, + "refs": { + } + }, + "UpdateLocationHdfsResponse": { + "base": null, + "refs": { + } + }, "UpdateLocationNfsRequest": { "base": null, "refs": { diff --git a/models/apis/finspace/2021-03-12/api-2.json b/models/apis/finspace/2021-03-12/api-2.json index b1bd6adb643..339b62ea8a7 100644 --- a/models/apis/finspace/2021-03-12/api-2.json +++ b/models/apis/finspace/2021-03-12/api-2.json @@ -155,7 +155,9 @@ "kmsKeyId":{"shape":"KmsKeyId"}, "tags":{"shape":"TagMap"}, "federationMode":{"shape":"FederationMode"}, - "federationParameters":{"shape":"FederationParameters"} + "federationParameters":{"shape":"FederationParameters"}, + "superuserParameters":{"shape":"SuperuserParameters"}, + "dataBundles":{"shape":"DataBundleArns"} } }, "CreateEnvironmentResponse":{ @@ -166,6 +168,16 @@ "environmentUrl":{"shape":"url"} } }, + "DataBundleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws:finspace:[A-Za-z0-9_/.-]{0,63}:\\d*:data-bundle/[0-9A-Za-z_-]{1,128}$" + }, + "DataBundleArns":{ + "type":"list", + "member":{"shape":"DataBundleArn"} + }, "DeleteEnvironmentRequest":{ "type":"structure", "required":["environmentId"], @@ -188,6 +200,13 @@ "min":1, "pattern":"^[a-zA-Z0-9. ]{1,1000}$" }, + "EmailId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Z0-9a-z._%+-]+@[A-Za-z0-9.-]+[.]+[A-Za-z]+", + "sensitive":true + }, "Environment":{ "type":"structure", "members":{ @@ -358,6 +377,12 @@ "tags":{"shape":"TagMap"} } }, + "NameString":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^[a-zA-Z0-9]{1,50}$" + }, "PaginationToken":{ "type":"string", "max":1000, @@ -397,6 +422,19 @@ "min":1, "pattern":"^[a-zA-Z-0-9-:\\/.]*$" }, + "SuperuserParameters":{ + "type":"structure", + "required":[ + "emailAddress", + "firstName", + "lastName" + ], + "members":{ + "emailAddress":{"shape":"EmailId"}, + "firstName":{"shape":"NameString"}, + "lastName":{"shape":"NameString"} + } + }, "TagKey":{ "type":"string", "max":128, diff --git a/models/apis/finspace/2021-03-12/docs-2.json b/models/apis/finspace/2021-03-12/docs-2.json index fb0308293a7..fcd97c472fa 100644 --- a/models/apis/finspace/2021-03-12/docs-2.json +++ b/models/apis/finspace/2021-03-12/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

The FinSpace management service provides the APIs for managing the FinSpace environments.

", + "service": "

The FinSpace management service provides the APIs for managing FinSpace environments.

", "operations": { "CreateEnvironment": "

Create a new FinSpace environment.

", "DeleteEnvironment": "

Delete an FinSpace environment.

", @@ -33,6 +33,18 @@ "refs": { } }, + "DataBundleArn": { + "base": "

The Amazon Resource Name (ARN) of the data bundle.

", + "refs": { + "DataBundleArns$member": null + } + }, + "DataBundleArns": { + "base": null, + "refs": { + "CreateEnvironmentRequest$dataBundles": "

The list of Amazon Resource Names (ARN) of the data bundles to install. Currently supported data bundle ARNs:

" + } + }, "DeleteEnvironmentRequest": { "base": null, "refs": { @@ -51,6 +63,12 @@ "UpdateEnvironmentRequest$description": "

The description of the environment.

" } }, + "EmailId": { + "base": null, + "refs": { + "SuperuserParameters$emailAddress": "

The email address of the superuser.

" + } + }, "Environment": { "base": "

Represents an FinSpace environment.

", "refs": { @@ -181,6 +199,13 @@ "refs": { } }, + "NameString": { + "base": null, + "refs": { + "SuperuserParameters$firstName": "

The first name of the superuser.

", + "SuperuserParameters$lastName": "

The last name of the superuser.

" + } + }, "PaginationToken": { "base": null, "refs": { @@ -216,6 +241,12 @@ "Environment$sageMakerStudioDomainUrl": "

The url of the integrated FinSpace notebook environment in your web application.

" } }, + "SuperuserParameters": { + "base": "

Configuration information for the superuser.

", + "refs": { + "CreateEnvironmentRequest$superuserParameters": "

Configuration information for the superuser.

" + } + }, "TagKey": { "base": null, "refs": { diff --git a/models/apis/macie2/2020-01-01/api-2.json b/models/apis/macie2/2020-01-01/api-2.json index 5401a5bf5f1..fe88dea8781 100644 --- a/models/apis/macie2/2020-01-01/api-2.json +++ b/models/apis/macie2/2020-01-01/api-2.json @@ -2806,6 +2806,10 @@ "shape": "__string", "locationName": "regex" }, + "severityLevels": { + "shape": "SeverityLevelList", + "locationName": "severityLevels" + }, "tags": { "shape": "TagMap", "locationName": "tags" @@ -3079,6 +3083,14 @@ "type": "structure", "members": {} }, + "DataIdentifierSeverity": { + "type": "string", + "enum": [ + "LOW", + "MEDIUM", + "HIGH" + ] + }, "DayOfWeek": { "type": "string", "enum": [ @@ -3872,6 +3884,10 @@ "shape": "__string", "locationName": "regex" }, + "severityLevels": { + "shape": "SeverityLevelList", + "locationName": "severityLevels" + }, "tags": { "shape": "TagMap", "locationName": "tags" @@ -5736,6 +5752,29 @@ "High" ] }, + "SeverityLevel": { + "type": "structure", + "members": { + "occurrencesThreshold": { + "shape": "__long", + "locationName": "occurrencesThreshold" + }, + "severity": { + "shape": "DataIdentifierSeverity", + "locationName": "severity" + } + }, + "required": [ + "occurrencesThreshold", + "severity" + ] + }, + "SeverityLevelList": { + "type": "list", + "member": { + "shape": "SeverityLevel" + } + }, "SharedAccess": { "type": "string", "enum": [ @@ -6577,4 +6616,4 @@ "timestampFormat": "unixTimestamp" } } -} \ No newline at end of file +} diff --git a/models/apis/macie2/2020-01-01/docs-2.json b/models/apis/macie2/2020-01-01/docs-2.json index f9c578d78ec..26fd2fd7559 100644 --- a/models/apis/macie2/2020-01-01/docs-2.json +++ b/models/apis/macie2/2020-01-01/docs-2.json @@ -4,7 +4,7 @@ "operations" : { "AcceptInvitation" : "

Accepts an Amazon Macie membership invitation that was received from a specific account.

", "BatchGetCustomDataIdentifiers" : "

Retrieves information about one or more custom data identifiers.

", - "CreateClassificationJob" : "

Creates and defines the settings for a classification job.

", + "CreateClassificationJob" : "

Creates and defines the settings for a classification job.

", "CreateCustomDataIdentifier" : "

Creates and defines the criteria and other settings for a custom data identifier.

", "CreateFindingsFilter" : "

Creates and defines the criteria and other settings for a findings filter.

", "CreateInvitations" : "

Sends an Amazon Macie membership invitation to one or more accounts.

", @@ -57,7 +57,7 @@ "UpdateClassificationJob" : "

Changes the status of a classification job.

", "UpdateFindingsFilter" : "

Updates the criteria and other settings for a findings filter.

", "UpdateMacieSession" : "

Suspends or re-enables an Amazon Macie account, or updates the configuration settings for a Macie account.

", - "UpdateMemberSession" : "

Enables an Amazon Macie administrator to suspend or re-enable a member account.

", + "UpdateMemberSession" : "

Enables an Amazon Macie administrator to suspend or re-enable Macie for a member account.

", "UpdateOrganizationConfiguration" : "

Updates the Amazon Macie configuration settings for an Amazon Web Services organization.

" }, "shapes" : { @@ -194,7 +194,7 @@ } }, "BucketMetadata" : { - "base" : "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for most of these properties is null. Exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause of the error, refer to the errorCode and errorMessage values.

", + "base" : "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for the versioning property is false and the value for most other properties is null. Exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause of the error, refer to the errorCode and errorMessage values.

", "refs" : { "__listOfBucketMetadata$member" : null } @@ -288,7 +288,7 @@ "refs" : { } }, "CreateCustomDataIdentifierRequest" : { - "base" : "

Specifies the criteria and other settings for a custom data identifier. You can't change a custom data identifier after you create it. This helps ensure that you have an immutable history of sensitive data findings and discovery results for data privacy and protection audits or investigations.

", + "base" : "

Specifies the detection criteria and other settings for a custom data identifier. You can't change a custom data identifier after you create it. This helps ensure that you have an immutable history of sensitive data findings and discovery results for data privacy and protection audits or investigations.

", "refs" : { } }, "CreateCustomDataIdentifierResponse" : { @@ -320,7 +320,7 @@ "refs" : { } }, "CreateSampleFindingsRequest" : { - "base" : "

Specifies the types of findings to include in a set of sample findings that Amazon Macie creates.

", + "base" : "

Specifies the types of sample findings to create.

", "refs" : { } }, "CriteriaBlockForJob" : { @@ -385,6 +385,12 @@ "JobScheduleFrequency$DailySchedule" : "

Specifies a daily recurrence pattern for running the job.

" } }, + "DataIdentifierSeverity" : { + "base" : "

The severity of a finding, ranging from LOW, for least severe, to HIGH, for most severe. Valid values are:

", + "refs" : { + "SeverityLevel$Severity" : "

The severity to assign to a finding if the number of occurrences is greater than or equal to the specified threshold (occurrencesThreshold) and, if applicable, is less than the threshold for the next consecutive severity level for the custom data identifier.

" + } + }, "DayOfWeek" : { "base" : null, "refs" : { @@ -576,7 +582,7 @@ "refs" : { } }, "GetCustomDataIdentifierResponse" : { - "base" : "

Provides information about the criteria and other settings for a custom data identifier.

", + "base" : "

Provides information about the detection criteria and other settings for a custom data identifier.

", "refs" : { } }, "GetFindingStatisticsRequest" : { @@ -1182,8 +1188,8 @@ "SensitiveDataItemCategory" : { "base" : "

For a finding, the category of sensitive data that was detected and produced the finding. For a managed data identifier, the category of sensitive data that the managed data identifier detects. Possible values are:

", "refs" : { - "ManagedDataIdentifierSummary$Category" : "

The category of sensitive data that the managed data identifier detects: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

", - "SensitiveDataItem$Category" : "

The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

" + "ManagedDataIdentifierSummary$Category" : "

The category of sensitive data that the managed data identifier detects: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret access keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

", + "SensitiveDataItem$Category" : "

The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret access keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

" } }, "ServerSideEncryption" : { @@ -1234,6 +1240,19 @@ "Severity$Description" : "

The qualitative representation of the finding's severity, ranging from Low (least severe) to High (most severe).

" } }, + "SeverityLevel" : { + "base" : "

Specifies a severity level for findings that a custom data identifier produces. A severity level determines which severity is assigned to the findings, based on the number of occurrences of text that matches the custom data identifier's detection criteria.

", + "refs" : { + "SeverityLevelList$member" : null + } + }, + "SeverityLevelList" : { + "base" : "

The severity to assign to findings that the custom data identifier produces, based on the number of occurrences of text that matches the custom data identifier's detection criteria. You can specify as many as three SeverityLevel objects in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify more than one, the occurrences thresholds must be in ascending order by severity, moving from LOW to HIGH. For example, 1 for LOW, 50 for MEDIUM, and 100 for HIGH. If an S3 object contains fewer occurrences than the lowest specified threshold, Amazon Macie doesn't create a finding.

If you don't specify any values for this array, Macie creates findings for S3 objects that contain at least one occurrence of text that matches the detection criteria, and Macie automatically assigns the MEDIUM severity to those findings.

", + "refs" : { + "CreateCustomDataIdentifierRequest$SeverityLevels" : "

The severity to assign to findings that the custom data identifier produces, based on the number of occurrences of text that matches the custom data identifier's detection criteria. You can specify as many as three SeverityLevel objects in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify more than one, the occurrences thresholds must be in ascending order by severity, moving from LOW to HIGH. For example, 1 for LOW, 50 for MEDIUM, and 100 for HIGH. If an S3 object contains fewer occurrences than the lowest specified threshold, Amazon Macie doesn't create a finding.

If you don't specify any values for this array, Macie creates findings for S3 objects that contain at least one occurrence of text that matches the detection criteria, and Macie assigns the MEDIUM severity to those findings.

", + "GetCustomDataIdentifierResponse$SeverityLevels" : "

Specifies the severity that's assigned to findings that the custom data identifier produces, based on the number of occurrences of text that matches the custom data identifier's detection criteria. By default, Amazon Macie creates findings for S3 objects that contain at least one occurrence of text that matches the detection criteria, and Macie assigns the MEDIUM severity to those findings.

" + } + }, "SharedAccess" : { "base" : null, "refs" : { @@ -1391,7 +1410,7 @@ "refs" : { } }, "UpdateMemberSessionRequest" : { - "base" : "

Suspends (pauses) or re-enables an Amazon Macie member account.

", + "base" : "

Suspends (pauses) or re-enables Amazon Macie for a member account.

", "refs" : { } }, "UpdateOrganizationConfigurationRequest" : { @@ -1533,11 +1552,11 @@ "base" : null, "refs" : { "CreateClassificationJobRequest$SamplingPercentage" : "

The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.

", - "CreateCustomDataIdentifierRequest$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", + "CreateCustomDataIdentifierRequest$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regular expression and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regular expression. The distance can be 1-300 characters. The default value is 50.

", "CreateFindingsFilterRequest$Position" : "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

", "DescribeBucketsRequest$MaxResults" : "

The maximum number of items to include in each page of the response. The default value is 50.

", "DescribeClassificationJobResponse$SamplingPercentage" : "

The sampling depth, as a percentage, that determines the percentage of eligible objects that the job analyzes.

", - "GetCustomDataIdentifierResponse$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.

", + "GetCustomDataIdentifierResponse$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regular expression and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regular expression.

", "GetFindingStatisticsRequest$Size" : "

The maximum number of items to include in each page of the response.

", "GetFindingsFilterResponse$Position" : "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

", "GetUsageStatisticsRequest$MaxResults" : "

The maximum number of items to include in each page of the response.

", @@ -1546,8 +1565,8 @@ "ListFindingsRequest$MaxResults" : "

The maximum number of items to include in each page of the response.

", "MonthlySchedule$DayOfMonth" : "

The numeric day of the month when Amazon Macie runs the job. This value can be an integer from 1 through 31.

If this value exceeds the number of days in a certain month, Macie doesn't run the job that month. Macie runs the job only during months that have the specified day. For example, if this value is 31 and a month has only 30 days, Macie doesn't run the job that month. To run the job every month, specify a value that's less than 29.

", "SearchResourcesRequest$MaxResults" : "

The maximum number of items to include in each page of the response. The default value is 50.

", - "TestCustomDataIdentifierRequest$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", - "TestCustomDataIdentifierResponse$MatchCount" : "

The number of instances of sample text that matched the detection criteria specified in the custom data identifier.

", + "TestCustomDataIdentifierRequest$MaximumMatchDistance" : "

The maximum number of characters that can exist between text that matches the regular expression and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regular expression. The distance can be 1-300 characters. The default value is 50.

", + "TestCustomDataIdentifierResponse$MatchCount" : "

The number of occurrences of sample text that matched the criteria specified by the custom data identifier.

", "UpdateFindingsFilterRequest$Position" : "

The position of the filter in the list of saved filters on the Amazon Macie console. This value also determines the order in which the filter is applied to findings, relative to other filters that are also applied to the findings.

" } }, @@ -1590,7 +1609,7 @@ "__listOfFindingType" : { "base" : null, "refs" : { - "CreateSampleFindingsRequest$FindingTypes" : "

An array that lists one or more types of findings to include in the set of sample findings. Currently, the only supported value is Policy:IAMUser/S3BucketEncryptionDisabled.

" + "CreateSampleFindingsRequest$FindingTypes" : "

An array of finding types, one for each type of sample finding to create. To create a sample of every type of finding that Amazon Macie supports, don't include this array in your request.

" } }, "__listOfFindingsFilterListItem" : { @@ -1726,8 +1745,8 @@ "BucketCriteriaAdditionalProperties$Neq" : "

The value for the property doesn't match (doesn't equal) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values.

", "CreateClassificationJobRequest$CustomDataIdentifierIds" : "

An array of unique identifiers, one for each custom data identifier for the job to use when it analyzes data. To use only managed data identifiers, don't specify a value for this property and specify a value other than NONE for the managedDataIdentifierSelector property.

", "CreateClassificationJobRequest$ManagedDataIdentifierIds" : "

An array of unique identifiers, one for each managed data identifier for the job to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type that you specify for the job (managedDataIdentifierSelector).

To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation.

", - "CreateCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", - "CreateCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", + "CreateCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression contains any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", + "CreateCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", "CreateInvitationsRequest$AccountIds" : "

An array that lists Amazon Web Services account IDs, one for each account to send the invitation to.

", "CriterionAdditionalProperties$Eq" : "

The value for the property matches (equals) the specified value. If you specify multiple values, Macie uses OR logic to join the values.

", "CriterionAdditionalProperties$EqExactMatch" : "

The value for the property exclusively matches (equals an exact match for) all the specified values. If you specify multiple values, Amazon Macie uses AND logic to join the values.

You can use this operator with the following properties: customDataIdentifiers.detections.arn, customDataIdentifiers.detections.name, resourcesAffected.s3Bucket.tags.key, resourcesAffected.s3Bucket.tags.value, resourcesAffected.s3Object.tags.key, resourcesAffected.s3Object.tags.value, sensitiveData.category, and sensitiveData.detections.type.

", @@ -1736,8 +1755,8 @@ "DeleteInvitationsRequest$AccountIds" : "

An array that lists Amazon Web Services account IDs, one for each account that sent an invitation to delete.

", "DescribeClassificationJobResponse$CustomDataIdentifierIds" : "

An array of unique identifiers, one for each custom data identifier that the job uses to analyze data. This value is null if the job uses only managed data identifiers to analyze data.

", "DescribeClassificationJobResponse$ManagedDataIdentifierIds" : "

An array of unique identifiers, one for each managed data identifier that the job is explicitly configured to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type specified for the job (managedDataIdentifierSelector). This value is null if the job's managed data identifier selection type is ALL or the job uses only custom data identifiers (customDataIdentifierIds) to analyze data.

", - "GetCustomDataIdentifierResponse$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. Ignore words are case sensitive.

", - "GetCustomDataIdentifierResponse$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. Keywords aren't case sensitive.

", + "GetCustomDataIdentifierResponse$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression contains any string in this array, Amazon Macie ignores it. Ignore words are case sensitive.

", + "GetCustomDataIdentifierResponse$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. Keywords aren't case sensitive.

", "GetFindingsRequest$FindingIds" : "

An array of strings that lists the unique identifiers for the findings to retrieve.

", "ListFindingsResponse$FindingIds" : "

An array of strings, where each string is the unique identifier for a finding that meets the filter criteria specified in the request.

", "ListJobsFilterTerm$Values" : "

An array that lists one or more values to use to filter the results.

", @@ -1746,8 +1765,8 @@ "SearchResourcesSimpleCriterion$Values" : "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values.

", "SimpleCriterionForJob$Values" : "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in these values.

", "SimpleScopeTerm$Values" : "

An array that lists the values to use in the condition. If the value for the key property is OBJECT_EXTENSION or OBJECT_KEY, this array can specify multiple values and Amazon Macie uses OR logic to join the values. Otherwise, this array can specify only one value.

Valid values for each supported property (key) are:

Macie doesn't support use of wildcard characters in these values. Also, string values are case sensitive.

", - "TestCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", - "TestCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", + "TestCustomDataIdentifierRequest$IgnoreWords" : "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression contains any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", + "TestCustomDataIdentifierRequest$Keywords" : "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", "UsageStatisticsFilter$Values" : "

An array that lists values to use in the condition, based on the value for the field specified by the key property. If the value for the key property is accountId, this array can specify multiple values. Otherwise, this array can specify only one value.

Valid values for each supported field are:

" } }, @@ -1818,7 +1837,8 @@ "S3Object$Size" : "

The total storage size, in bytes, of the object.

", "SensitiveDataItem$TotalCount" : "

The total number of occurrences of the sensitive data that was detected.

", "ServiceLimit$Value" : "

The value for the metric specified by the UsageByAccount.type field in the response.

", - "Severity$Score" : "

The numerical representation of the finding's severity, ranging from 1 (least severe) to 3 (most severe).

" + "Severity$Score" : "

The numerical representation of the finding's severity, ranging from 1 (least severe) to 3 (most severe).

", + "SeverityLevel$OccurrencesThreshold" : "

The minimum number of occurrences of text that must match the custom data identifier's detection criteria in order to produce a finding with the specified severity (severity).

" } }, "__string" : { @@ -1858,8 +1878,8 @@ "ClassificationDetails$JobArn" : "

The Amazon Resource Name (ARN) of the classification job that produced the finding.

", "ClassificationDetails$JobId" : "

The unique identifier for the classification job that produced the finding.

", "ClassificationResult$MimeType" : "

The type of content, as a MIME type, that the finding applies to. For example, application/gzip, for a GNU Gzip compressed archive file, or application/pdf, for an Adobe Portable Document Format file.

", - "ClassificationResultStatus$Code" : "

The status of the finding. Possible values are:

", - "ClassificationResultStatus$Reason" : "

A brief description of the status of the finding. Amazon Macie uses this value to notify you of any errors, warnings, or considerations that might impact your analysis of the finding.

", + "ClassificationResultStatus$Code" : "

The status of the finding. Possible values are:

", + "ClassificationResultStatus$Reason" : "

A brief description of the status of the finding. This value is null if the status (code) of the finding is COMPLETE.

Amazon Macie uses this value to notify you of any errors, warnings, or considerations that might impact your analysis of the finding and the affected S3 object. Possible values are:

For information about sensitive data discovery quotas for files, see Amazon Macie quotas in the Amazon Macie User Guide.

", "ConflictException$Message" : "

The explanation of the error that occurred.

", "CreateClassificationJobRequest$ClientToken" : "

A unique, case-sensitive token that you provide to ensure the idempotency of the request.

", "CreateClassificationJobRequest$Description" : "

A custom description of the job. The description can contain as many as 200 characters.

", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index bf5693d66b4..5160602f593 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -16362,7 +16362,12 @@ "defaults" : { "hostname" : "{service}.{region}.{dnsSuffix}", "protocols" : [ "https" ], - "signatureVersions" : [ "v4" ] + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "c2s.ic.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] }, "dnsSuffix" : "c2s.ic.gov", "partition" : "aws-iso", @@ -16849,7 +16854,12 @@ "defaults" : { "hostname" : "{service}.{region}.{dnsSuffix}", "protocols" : [ "https" ], - "signatureVersions" : [ "v4" ] + "signatureVersions" : [ "v4" ], + "variants" : [ { + "dnsSuffix" : "sc2s.sgov.gov", + "hostname" : "{service}-fips.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] }, "dnsSuffix" : "sc2s.sgov.gov", "partition" : "aws-iso-b", diff --git a/service/connectparticipant/api.go b/service/connectparticipant/api.go index 4a20f3c11c5..d7816d92455 100644 --- a/service/connectparticipant/api.go +++ b/service/connectparticipant/api.go @@ -2669,18 +2669,6 @@ const ( // ChatItemTypeConnectionAck is a ChatItemType enum value ChatItemTypeConnectionAck = "CONNECTION_ACK" - - // ChatItemTypeParticipantActive is a ChatItemType enum value - ChatItemTypeParticipantActive = "PARTICIPANT_ACTIVE" - - // ChatItemTypeParticipantInactive is a ChatItemType enum value - ChatItemTypeParticipantInactive = "PARTICIPANT_INACTIVE" - - // ChatItemTypeParticipantEngaged is a ChatItemType enum value - ChatItemTypeParticipantEngaged = "PARTICIPANT_ENGAGED" - - // ChatItemTypeParticipantDisengaged is a ChatItemType enum value - ChatItemTypeParticipantDisengaged = "PARTICIPANT_DISENGAGED" ) // ChatItemType_Values returns all elements of the ChatItemType enum @@ -2696,10 +2684,6 @@ func ChatItemType_Values() []string { ChatItemTypeEvent, ChatItemTypeAttachment, ChatItemTypeConnectionAck, - ChatItemTypeParticipantActive, - ChatItemTypeParticipantInactive, - ChatItemTypeParticipantEngaged, - ChatItemTypeParticipantDisengaged, } } diff --git a/service/datasync/api.go b/service/datasync/api.go index 1d4e8c19c83..4758828d554 100644 --- a/service/datasync/api.go +++ b/service/datasync/api.go @@ -368,6 +368,88 @@ func (c *DataSync) CreateLocationFsxWindowsWithContext(ctx aws.Context, input *C return out, req.Send() } +const opCreateLocationHdfs = "CreateLocationHdfs" + +// CreateLocationHdfsRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationHdfs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationHdfs for more information on using the CreateLocationHdfs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationHdfsRequest method. +// req, resp := client.CreateLocationHdfsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationHdfs +func (c *DataSync) CreateLocationHdfsRequest(input *CreateLocationHdfsInput) (req *request.Request, output *CreateLocationHdfsOutput) { + op := &request.Operation{ + Name: opCreateLocationHdfs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationHdfsInput{} + } + + output = &CreateLocationHdfsOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationHdfs API operation for AWS DataSync. +// +// Creates an endpoint for a Hadoop Distributed File System (HDFS). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationHdfs for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationHdfs +func (c *DataSync) CreateLocationHdfs(input *CreateLocationHdfsInput) (*CreateLocationHdfsOutput, error) { + req, out := c.CreateLocationHdfsRequest(input) + return out, req.Send() +} + +// CreateLocationHdfsWithContext is the same as CreateLocationHdfs with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationHdfs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationHdfsWithContext(ctx aws.Context, input *CreateLocationHdfsInput, opts ...request.Option) (*CreateLocationHdfsOutput, error) { + req, out := c.CreateLocationHdfsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLocationNfs = "CreateLocationNfs" // CreateLocationNfsRequest generates a "aws/request.Request" representing the @@ -1306,6 +1388,89 @@ func (c *DataSync) DescribeLocationFsxWindowsWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeLocationHdfs = "DescribeLocationHdfs" + +// DescribeLocationHdfsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationHdfs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationHdfs for more information on using the DescribeLocationHdfs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationHdfsRequest method. +// req, resp := client.DescribeLocationHdfsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationHdfs +func (c *DataSync) DescribeLocationHdfsRequest(input *DescribeLocationHdfsInput) (req *request.Request, output *DescribeLocationHdfsOutput) { + op := &request.Operation{ + Name: opDescribeLocationHdfs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationHdfsInput{} + } + + output = &DescribeLocationHdfsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationHdfs API operation for AWS DataSync. +// +// Returns metadata, such as the authentication information about the Hadoop +// Distributed File System (HDFS) location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationHdfs for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationHdfs +func (c *DataSync) DescribeLocationHdfs(input *DescribeLocationHdfsInput) (*DescribeLocationHdfsOutput, error) { + req, out := c.DescribeLocationHdfsRequest(input) + return out, req.Send() +} + +// DescribeLocationHdfsWithContext is the same as DescribeLocationHdfs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationHdfs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationHdfsWithContext(ctx aws.Context, input *DescribeLocationHdfsInput, opts ...request.Option) (*DescribeLocationHdfsOutput, error) { + req, out := c.DescribeLocationHdfsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLocationNfs = "DescribeLocationNfs" // DescribeLocationNfsRequest generates a "aws/request.Request" representing the @@ -2854,6 +3019,90 @@ func (c *DataSync) UpdateAgentWithContext(ctx aws.Context, input *UpdateAgentInp return out, req.Send() } +const opUpdateLocationHdfs = "UpdateLocationHdfs" + +// UpdateLocationHdfsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLocationHdfs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLocationHdfs for more information on using the UpdateLocationHdfs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateLocationHdfsRequest method. +// req, resp := client.UpdateLocationHdfsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateLocationHdfs +func (c *DataSync) UpdateLocationHdfsRequest(input *UpdateLocationHdfsInput) (req *request.Request, output *UpdateLocationHdfsOutput) { + op := &request.Operation{ + Name: opUpdateLocationHdfs, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLocationHdfsInput{} + } + + output = &UpdateLocationHdfsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateLocationHdfs API operation for AWS DataSync. +// +// Updates some parameters of a previously created location for a Hadoop Distributed +// File System cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation UpdateLocationHdfs for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateLocationHdfs +func (c *DataSync) UpdateLocationHdfs(input *UpdateLocationHdfsInput) (*UpdateLocationHdfsOutput, error) { + req, out := c.UpdateLocationHdfsRequest(input) + return out, req.Send() +} + +// UpdateLocationHdfsWithContext is the same as UpdateLocationHdfs with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLocationHdfs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) UpdateLocationHdfsWithContext(ctx aws.Context, input *UpdateLocationHdfsInput, opts ...request.Option) (*UpdateLocationHdfsOutput, error) { + req, out := c.UpdateLocationHdfsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateLocationNfs = "UpdateLocationNfs" // UpdateLocationNfsRequest generates a "aws/request.Request" representing the @@ -3905,6 +4154,271 @@ func (s *CreateLocationFsxWindowsOutput) SetLocationArn(v string) *CreateLocatio return s } +type CreateLocationHdfsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARNs) of the agents that are used to connect to + // the HDFS cluster. + // + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` + + // The type of authentication used to determine the identity of the user. + // + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"HdfsAuthenticationType"` + + // The size of data blocks to write into the HDFS cluster. The block size must + // be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB). + BlockSize *int64 `min:"1.048576e+06" type:"integer"` + + // The Kerberos key table (keytab) that contains mappings between the defined + // Kerberos principal and the encrypted keys. You can load the keytab from a + // file by providing the file's address. If you're using the CLI, it performs + // base64 encoding for you. Otherwise, provide the base64-encoded text. + // + // If KERBEROS is specified for AuthenticationType, this parameter is required. + // KerberosKeytab is automatically base64 encoded/decoded by the SDK. + KerberosKeytab []byte `type:"blob"` + + // The krb5.conf file that contains the Kerberos configuration information. + // You can load the krb5.conf file by providing the file's address. If you're + // using the CLI, it performs the base64 encoding for you. Otherwise, provide + // the base64-encoded text. + // + // If KERBEROS is specified for AuthenticationType, this parameter is required. + // KerberosKrb5Conf is automatically base64 encoded/decoded by the SDK. + KerberosKrb5Conf []byte `type:"blob"` + + // The Kerberos principal with access to the files and folders on the HDFS cluster. + // + // If KERBEROS is specified for AuthenticationType, this parameter is required. + KerberosPrincipal *string `min:"1" type:"string"` + + // The URI of the HDFS cluster's Key Management Server (KMS). + KmsKeyProviderUri *string `min:"1" type:"string"` + + // The NameNode that manages the HDFS namespace. The NameNode performs operations + // such as opening, closing, and renaming files and directories. The NameNode + // contains the information to map blocks of data to the DataNodes. You can + // use only one NameNode. + // + // NameNodes is a required field + NameNodes []*HdfsNameNode `min:"1" type:"list" required:"true"` + + // The Quality of Protection (QOP) configuration specifies the Remote Procedure + // Call (RPC) and data transfer protection settings configured on the Hadoop + // Distributed File System (HDFS) cluster. If QopConfiguration isn't specified, + // RpcProtection and DataTransferProtection default to PRIVACY. If you set RpcProtection + // or DataTransferProtection, the other parameter assumes the same value. + QopConfiguration *QopConfiguration `type:"structure"` + + // The number of DataNodes to replicate the data to when writing to the HDFS + // cluster. By default, data is replicated to three DataNodes. + ReplicationFactor *int64 `min:"1" type:"integer"` + + // The user name used to identify the client on the host operating system. + // + // If SIMPLE is specified for AuthenticationType, this parameter is required. + SimpleUser *string `min:"1" type:"string"` + + // A subdirectory in the HDFS cluster. This subdirectory is used to read data + // from or write data to the HDFS cluster. If the subdirectory isn't specified, + // it will default to /. + Subdirectory *string `type:"string"` + + // The key-value pair that represents the tag that you want to add to the location. + // The value can be an empty string. We recommend using tags to name your resources. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationHdfsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationHdfsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationHdfsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationHdfsInput"} + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) + } + if s.BlockSize != nil && *s.BlockSize < 1.048576e+06 { + invalidParams.Add(request.NewErrParamMinValue("BlockSize", 1.048576e+06)) + } + if s.KerberosPrincipal != nil && len(*s.KerberosPrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KerberosPrincipal", 1)) + } + if s.KmsKeyProviderUri != nil && len(*s.KmsKeyProviderUri) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyProviderUri", 1)) + } + if s.NameNodes == nil { + invalidParams.Add(request.NewErrParamRequired("NameNodes")) + } + if s.NameNodes != nil && len(s.NameNodes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NameNodes", 1)) + } + if s.ReplicationFactor != nil && *s.ReplicationFactor < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReplicationFactor", 1)) + } + if s.SimpleUser != nil && len(*s.SimpleUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SimpleUser", 1)) + } + if s.NameNodes != nil { + for i, v := range s.NameNodes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NameNodes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationHdfsInput) SetAgentArns(v []*string) *CreateLocationHdfsInput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *CreateLocationHdfsInput) SetAuthenticationType(v string) *CreateLocationHdfsInput { + s.AuthenticationType = &v + return s +} + +// SetBlockSize sets the BlockSize field's value. +func (s *CreateLocationHdfsInput) SetBlockSize(v int64) *CreateLocationHdfsInput { + s.BlockSize = &v + return s +} + +// SetKerberosKeytab sets the KerberosKeytab field's value. +func (s *CreateLocationHdfsInput) SetKerberosKeytab(v []byte) *CreateLocationHdfsInput { + s.KerberosKeytab = v + return s +} + +// SetKerberosKrb5Conf sets the KerberosKrb5Conf field's value. +func (s *CreateLocationHdfsInput) SetKerberosKrb5Conf(v []byte) *CreateLocationHdfsInput { + s.KerberosKrb5Conf = v + return s +} + +// SetKerberosPrincipal sets the KerberosPrincipal field's value. +func (s *CreateLocationHdfsInput) SetKerberosPrincipal(v string) *CreateLocationHdfsInput { + s.KerberosPrincipal = &v + return s +} + +// SetKmsKeyProviderUri sets the KmsKeyProviderUri field's value. +func (s *CreateLocationHdfsInput) SetKmsKeyProviderUri(v string) *CreateLocationHdfsInput { + s.KmsKeyProviderUri = &v + return s +} + +// SetNameNodes sets the NameNodes field's value. +func (s *CreateLocationHdfsInput) SetNameNodes(v []*HdfsNameNode) *CreateLocationHdfsInput { + s.NameNodes = v + return s +} + +// SetQopConfiguration sets the QopConfiguration field's value. +func (s *CreateLocationHdfsInput) SetQopConfiguration(v *QopConfiguration) *CreateLocationHdfsInput { + s.QopConfiguration = v + return s +} + +// SetReplicationFactor sets the ReplicationFactor field's value. +func (s *CreateLocationHdfsInput) SetReplicationFactor(v int64) *CreateLocationHdfsInput { + s.ReplicationFactor = &v + return s +} + +// SetSimpleUser sets the SimpleUser field's value. +func (s *CreateLocationHdfsInput) SetSimpleUser(v string) *CreateLocationHdfsInput { + s.SimpleUser = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationHdfsInput) SetSubdirectory(v string) *CreateLocationHdfsInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationHdfsInput) SetTags(v []*TagListEntry) *CreateLocationHdfsInput { + s.Tags = v + return s +} + +type CreateLocationHdfsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the source HDFS cluster location that's created. + LocationArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationHdfsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateLocationHdfsOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationHdfsOutput) SetLocationArn(v string) *CreateLocationHdfsOutput { + s.LocationArn = &v + return s +} + // CreateLocationNfsRequest type CreateLocationNfsInput struct { _ struct{} `type:"structure"` @@ -4685,9 +5199,9 @@ type CreateTaskInput struct { Excludes []*FilterRule `type:"list"` // A list of filter rules that determines which files to include when running - // a task. The pattern should contain a single filter string that consists of - // the patterns to include. The patterns are delimited by "|" (that is, a pipe). - // For example: "/folder1|/folder2" + // a task. The pattern contains a single filter string that consists of the + // patterns to include. The patterns are delimited by "|" (that is, a pipe), + // for example, "/folder1|/folder2". Includes []*FilterRule `type:"list"` // The name of a task. This value is a text reference that is used to identify @@ -5416,33 +5930,214 @@ func (s *DescribeLocationFsxWindowsOutput) SetCreationTime(v time.Time) *Describ return s } -// SetDomain sets the Domain field's value. -func (s *DescribeLocationFsxWindowsOutput) SetDomain(v string) *DescribeLocationFsxWindowsOutput { - s.Domain = &v +// SetDomain sets the Domain field's value. +func (s *DescribeLocationFsxWindowsOutput) SetDomain(v string) *DescribeLocationFsxWindowsOutput { + s.Domain = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationFsxWindowsOutput) SetLocationArn(v string) *DescribeLocationFsxWindowsOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationFsxWindowsOutput) SetLocationUri(v string) *DescribeLocationFsxWindowsOutput { + s.LocationUri = &v + return s +} + +// SetSecurityGroupArns sets the SecurityGroupArns field's value. +func (s *DescribeLocationFsxWindowsOutput) SetSecurityGroupArns(v []*string) *DescribeLocationFsxWindowsOutput { + s.SecurityGroupArns = v + return s +} + +// SetUser sets the User field's value. +func (s *DescribeLocationFsxWindowsOutput) SetUser(v string) *DescribeLocationFsxWindowsOutput { + s.User = &v + return s +} + +type DescribeLocationHdfsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the HDFS cluster location to describe. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationHdfsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationHdfsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationHdfsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationHdfsInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationHdfsInput) SetLocationArn(v string) *DescribeLocationHdfsInput { + s.LocationArn = &v + return s +} + +type DescribeLocationHdfsOutput struct { + _ struct{} `type:"structure"` + + // The ARNs of the agents that are used to connect to the HDFS cluster. + AgentArns []*string `min:"1" type:"list"` + + // The type of authentication used to determine the identity of the user. + AuthenticationType *string `type:"string" enum:"HdfsAuthenticationType"` + + // The size of the data blocks to write into the HDFS cluster. + BlockSize *int64 `min:"1.048576e+06" type:"integer"` + + // The time that the HDFS location was created. + CreationTime *time.Time `type:"timestamp"` + + // The Kerberos principal with access to the files and folders on the HDFS cluster. + // This parameter is used if the AuthenticationType is defined as KERBEROS. + KerberosPrincipal *string `min:"1" type:"string"` + + // The URI of the HDFS cluster's Key Management Server (KMS). + KmsKeyProviderUri *string `min:"1" type:"string"` + + // The ARN of the HDFS cluster location. + LocationArn *string `type:"string"` + + // The URI of the HDFS cluster location. + LocationUri *string `type:"string"` + + // The NameNode that manage the HDFS namespace. + NameNodes []*HdfsNameNode `min:"1" type:"list"` + + // The Quality of Protection (QOP) configuration specifies the Remote Procedure + // Call (RPC) and data transfer protection settings configured on the Hadoop + // Distributed File System (HDFS) cluster. + QopConfiguration *QopConfiguration `type:"structure"` + + // The number of DataNodes to replicate the data to when writing to the HDFS + // cluster. + ReplicationFactor *int64 `min:"1" type:"integer"` + + // The user name used to identify the client on the host operating system. This + // parameter is used if the AuthenticationType is defined as SIMPLE. + SimpleUser *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationHdfsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeLocationHdfsOutput) GoString() string { + return s.String() +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationHdfsOutput) SetAgentArns(v []*string) *DescribeLocationHdfsOutput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *DescribeLocationHdfsOutput) SetAuthenticationType(v string) *DescribeLocationHdfsOutput { + s.AuthenticationType = &v + return s +} + +// SetBlockSize sets the BlockSize field's value. +func (s *DescribeLocationHdfsOutput) SetBlockSize(v int64) *DescribeLocationHdfsOutput { + s.BlockSize = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationHdfsOutput) SetCreationTime(v time.Time) *DescribeLocationHdfsOutput { + s.CreationTime = &v + return s +} + +// SetKerberosPrincipal sets the KerberosPrincipal field's value. +func (s *DescribeLocationHdfsOutput) SetKerberosPrincipal(v string) *DescribeLocationHdfsOutput { + s.KerberosPrincipal = &v + return s +} + +// SetKmsKeyProviderUri sets the KmsKeyProviderUri field's value. +func (s *DescribeLocationHdfsOutput) SetKmsKeyProviderUri(v string) *DescribeLocationHdfsOutput { + s.KmsKeyProviderUri = &v return s } // SetLocationArn sets the LocationArn field's value. -func (s *DescribeLocationFsxWindowsOutput) SetLocationArn(v string) *DescribeLocationFsxWindowsOutput { +func (s *DescribeLocationHdfsOutput) SetLocationArn(v string) *DescribeLocationHdfsOutput { s.LocationArn = &v return s } // SetLocationUri sets the LocationUri field's value. -func (s *DescribeLocationFsxWindowsOutput) SetLocationUri(v string) *DescribeLocationFsxWindowsOutput { +func (s *DescribeLocationHdfsOutput) SetLocationUri(v string) *DescribeLocationHdfsOutput { s.LocationUri = &v return s } -// SetSecurityGroupArns sets the SecurityGroupArns field's value. -func (s *DescribeLocationFsxWindowsOutput) SetSecurityGroupArns(v []*string) *DescribeLocationFsxWindowsOutput { - s.SecurityGroupArns = v +// SetNameNodes sets the NameNodes field's value. +func (s *DescribeLocationHdfsOutput) SetNameNodes(v []*HdfsNameNode) *DescribeLocationHdfsOutput { + s.NameNodes = v return s } -// SetUser sets the User field's value. -func (s *DescribeLocationFsxWindowsOutput) SetUser(v string) *DescribeLocationFsxWindowsOutput { - s.User = &v +// SetQopConfiguration sets the QopConfiguration field's value. +func (s *DescribeLocationHdfsOutput) SetQopConfiguration(v *QopConfiguration) *DescribeLocationHdfsOutput { + s.QopConfiguration = v + return s +} + +// SetReplicationFactor sets the ReplicationFactor field's value. +func (s *DescribeLocationHdfsOutput) SetReplicationFactor(v int64) *DescribeLocationHdfsOutput { + s.ReplicationFactor = &v + return s +} + +// SetSimpleUser sets the SimpleUser field's value. +func (s *DescribeLocationHdfsOutput) SetSimpleUser(v string) *DescribeLocationHdfsOutput { + s.SimpleUser = &v return s } @@ -6262,8 +6957,8 @@ type DescribeTaskOutput struct { // location. DestinationLocationArn *string `type:"string"` - // The Amazon Resource Name (ARN) of the destination ENIs (Elastic Network Interface) - // that was created for your subnet. + // The Amazon Resource Names (ARNs) of the destination elastic network interfaces + // (ENIs) that were created for your subnet. DestinationNetworkInterfaceArns []*string `type:"list"` // Errors that DataSync encountered during execution of the task. You can use @@ -6276,14 +6971,14 @@ type DescribeTaskOutput struct { // A list of filter rules that determines which files to exclude from a task. // The list should contain a single filter string that consists of the patterns - // to exclude. The patterns are delimited by "|" (that is, a pipe), for example: - // "/folder1|/folder2" + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example, + // "/folder1|/folder2". Excludes []*FilterRule `type:"list"` // A list of filter rules that determines which files to include when running - // a task. The pattern should contain a single filter string that consists of - // the patterns to include. The patterns are delimited by "|" (that is, a pipe). - // For example: "/folder1|/folder2" + // a task. The pattern contains a single filter string that consists of the + // patterns to include. The patterns are delimited by "|" (that is, a pipe), + // for example, "/folder1|/folder2". Includes []*FilterRule `type:"list"` // The name of the task that was described. @@ -6306,8 +7001,8 @@ type DescribeTaskOutput struct { // The Amazon Resource Name (ARN) of the source file system's location. SourceLocationArn *string `type:"string"` - // The Amazon Resource Name (ARN) of the source ENIs (Elastic Network Interface) - // that was created for your subnet. + // The Amazon Resource Names (ARNs) of the source elastic network interfaces + // (ENIs) that were created for your subnet. SourceNetworkInterfaceArns []*string `type:"list"` // The status of the task that was described. @@ -6547,6 +7242,78 @@ func (s *FilterRule) SetValue(v string) *FilterRule { return s } +// The NameNode of the Hadoop Distributed File System (HDFS). The NameNode manages +// the file system's namespace. The NameNode performs operations such as opening, +// closing, and renaming files and directories. The NameNode contains the information +// to map blocks of data to the DataNodes. +type HdfsNameNode struct { + _ struct{} `type:"structure"` + + // The hostname of the NameNode in the HDFS cluster. This value is the IP address + // or Domain Name Service (DNS) name of the NameNode. An agent that's installed + // on-premises uses this hostname to communicate with the NameNode in the network. + // + // Hostname is a required field + Hostname *string `min:"1" type:"string" required:"true"` + + // The port that the NameNode uses to listen to client requests. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HdfsNameNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HdfsNameNode) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HdfsNameNode) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HdfsNameNode"} + if s.Hostname == nil { + invalidParams.Add(request.NewErrParamRequired("Hostname")) + } + if s.Hostname != nil && len(*s.Hostname) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Hostname", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostname sets the Hostname field's value. +func (s *HdfsNameNode) SetHostname(v string) *HdfsNameNode { + s.Hostname = &v + return s +} + +// SetPort sets the Port field's value. +func (s *HdfsNameNode) SetPort(v int64) *HdfsNameNode { + s.Port = &v + return s +} + // This exception is thrown when an error occurs in the DataSync service. type InternalException struct { _ struct{} `type:"structure"` @@ -7354,7 +8121,7 @@ type NfsMountOptions struct { // that allows for asynchronous writes on the server. // // * NFSv4.0 (https://tools.ietf.org/html/rfc3530) - stateful, firewall-friendly - // protocol version that supports delegations and pseudo filesystems. + // protocol version that supports delegations and pseudo file systems. // // * NFSv4.1 (https://tools.ietf.org/html/rfc5661) - stateful protocol version // that supports sessions, directory delegations, and parallel data processing. @@ -7837,6 +8604,53 @@ func (s *PrivateLinkConfig) SetVpcEndpointId(v string) *PrivateLinkConfig { return s } +// The Quality of Protection (QOP) configuration specifies the Remote Procedure +// Call (RPC) and data transfer privacy settings configured on the Hadoop Distributed +// File System (HDFS) cluster. +type QopConfiguration struct { + _ struct{} `type:"structure"` + + // The data transfer protection setting configured on the HDFS cluster. This + // setting corresponds to your dfs.data.transfer.protection setting in the hdfs-site.xml + // file on your Hadoop cluster. + DataTransferProtection *string `type:"string" enum:"HdfsDataTransferProtection"` + + // The RPC protection setting configured on the HDFS cluster. This setting corresponds + // to your hadoop.rpc.protection setting in your core-site.xml file on your + // Hadoop cluster. + RpcProtection *string `type:"string" enum:"HdfsRpcProtection"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QopConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QopConfiguration) GoString() string { + return s.String() +} + +// SetDataTransferProtection sets the DataTransferProtection field's value. +func (s *QopConfiguration) SetDataTransferProtection(v string) *QopConfiguration { + s.DataTransferProtection = &v + return s +} + +// SetRpcProtection sets the RpcProtection field's value. +func (s *QopConfiguration) SetRpcProtection(v string) *QopConfiguration { + s.RpcProtection = &v + return s +} + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) // role that is used to access an Amazon S3 bucket. // @@ -7931,15 +8745,15 @@ type StartTaskExecutionInput struct { _ struct{} `type:"structure"` // A list of filter rules that determines which files to exclude from a task. - // The list should contain a single filter string that consists of the patterns - // to exclude. The patterns are delimited by "|" (that is, a pipe), for example, + // The list contains a single filter string that consists of the patterns to + // exclude. The patterns are delimited by "|" (that is, a pipe), for example, // "/folder1|/folder2". Excludes []*FilterRule `type:"list"` // A list of filter rules that determines which files to include when running // a task. The pattern should contain a single filter string that consists of - // the patterns to include. The patterns are delimited by "|" (that is, a pipe). - // For example: "/folder1|/folder2" + // the patterns to include. The patterns are delimited by "|" (that is, a pipe), + // for example, "/folder1|/folder2". Includes []*FilterRule `type:"list"` // Represents the options that are available to control the behavior of a StartTaskExecution @@ -8053,8 +8867,9 @@ func (s *StartTaskExecutionOutput) SetTaskExecutionArn(v string) *StartTaskExecu return s } -// Represents a single entry in a list of AWS resource tags. TagListEntry returns -// an array that contains a list of tasks when the ListTagsForResource (https://docs.aws.amazon.com/datasync/latest/userguide/API_ListTagsForResource.html) +// Represents a single entry in a list of Amazon Web Services resource tags. +// TagListEntry returns an array that contains a list of tasks when the ListTagsForResource +// (https://docs.aws.amazon.com/datasync/latest/userguide/API_ListTagsForResource.html) // operation is called. type TagListEntry struct { _ struct{} `type:"structure"` @@ -8714,6 +9529,228 @@ func (s UpdateAgentOutput) GoString() string { return s.String() } +type UpdateLocationHdfsInput struct { + _ struct{} `type:"structure"` + + // The ARNs of the agents that are used to connect to the HDFS cluster. + AgentArns []*string `min:"1" type:"list"` + + // The type of authentication used to determine the identity of the user. + AuthenticationType *string `type:"string" enum:"HdfsAuthenticationType"` + + // The size of the data blocks to write into the HDFS cluster. + BlockSize *int64 `min:"1.048576e+06" type:"integer"` + + // The Kerberos key table (keytab) that contains mappings between the defined + // Kerberos principal and the encrypted keys. You can load the keytab from a + // file by providing the file's address. If you use the AWS CLI, it performs + // base64 encoding for you. Otherwise, provide the base64-encoded text. + // KerberosKeytab is automatically base64 encoded/decoded by the SDK. + KerberosKeytab []byte `type:"blob"` + + // The krb5.conf file that contains the Kerberos configuration information. + // You can load the krb5.conf file by providing the file's address. If you're + // using the AWS CLI, it performs the base64 encoding for you. Otherwise, provide + // the base64-encoded text. + // KerberosKrb5Conf is automatically base64 encoded/decoded by the SDK. + KerberosKrb5Conf []byte `type:"blob"` + + // The Kerberos principal with access to the files and folders on the HDFS cluster. + KerberosPrincipal *string `min:"1" type:"string"` + + // The URI of the HDFS cluster's Key Management Server (KMS). + KmsKeyProviderUri *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the source HDFS cluster location. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` + + // The NameNode that manages the HDFS namespace. The NameNode performs operations + // such as opening, closing, and renaming files and directories. The NameNode + // contains the information to map blocks of data to the DataNodes. You can + // use only one NameNode. + NameNodes []*HdfsNameNode `min:"1" type:"list"` + + // The Quality of Protection (QOP) configuration specifies the Remote Procedure + // Call (RPC) and data transfer privacy settings configured on the Hadoop Distributed + // File System (HDFS) cluster. + QopConfiguration *QopConfiguration `type:"structure"` + + // The number of DataNodes to replicate the data to when writing to the HDFS + // cluster. + ReplicationFactor *int64 `min:"1" type:"integer"` + + // The user name used to identify the client on the host operating system. + SimpleUser *string `min:"1" type:"string"` + + // A subdirectory in the HDFS cluster. This subdirectory is used to read data + // from or write data to the HDFS cluster. + Subdirectory *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationHdfsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationHdfsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLocationHdfsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLocationHdfsInput"} + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.BlockSize != nil && *s.BlockSize < 1.048576e+06 { + invalidParams.Add(request.NewErrParamMinValue("BlockSize", 1.048576e+06)) + } + if s.KerberosPrincipal != nil && len(*s.KerberosPrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KerberosPrincipal", 1)) + } + if s.KmsKeyProviderUri != nil && len(*s.KmsKeyProviderUri) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyProviderUri", 1)) + } + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + if s.NameNodes != nil && len(s.NameNodes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NameNodes", 1)) + } + if s.ReplicationFactor != nil && *s.ReplicationFactor < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReplicationFactor", 1)) + } + if s.SimpleUser != nil && len(*s.SimpleUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SimpleUser", 1)) + } + if s.NameNodes != nil { + for i, v := range s.NameNodes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NameNodes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAgentArns sets the AgentArns field's value. +func (s *UpdateLocationHdfsInput) SetAgentArns(v []*string) *UpdateLocationHdfsInput { + s.AgentArns = v + return s +} + +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *UpdateLocationHdfsInput) SetAuthenticationType(v string) *UpdateLocationHdfsInput { + s.AuthenticationType = &v + return s +} + +// SetBlockSize sets the BlockSize field's value. +func (s *UpdateLocationHdfsInput) SetBlockSize(v int64) *UpdateLocationHdfsInput { + s.BlockSize = &v + return s +} + +// SetKerberosKeytab sets the KerberosKeytab field's value. +func (s *UpdateLocationHdfsInput) SetKerberosKeytab(v []byte) *UpdateLocationHdfsInput { + s.KerberosKeytab = v + return s +} + +// SetKerberosKrb5Conf sets the KerberosKrb5Conf field's value. +func (s *UpdateLocationHdfsInput) SetKerberosKrb5Conf(v []byte) *UpdateLocationHdfsInput { + s.KerberosKrb5Conf = v + return s +} + +// SetKerberosPrincipal sets the KerberosPrincipal field's value. +func (s *UpdateLocationHdfsInput) SetKerberosPrincipal(v string) *UpdateLocationHdfsInput { + s.KerberosPrincipal = &v + return s +} + +// SetKmsKeyProviderUri sets the KmsKeyProviderUri field's value. +func (s *UpdateLocationHdfsInput) SetKmsKeyProviderUri(v string) *UpdateLocationHdfsInput { + s.KmsKeyProviderUri = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *UpdateLocationHdfsInput) SetLocationArn(v string) *UpdateLocationHdfsInput { + s.LocationArn = &v + return s +} + +// SetNameNodes sets the NameNodes field's value. +func (s *UpdateLocationHdfsInput) SetNameNodes(v []*HdfsNameNode) *UpdateLocationHdfsInput { + s.NameNodes = v + return s +} + +// SetQopConfiguration sets the QopConfiguration field's value. +func (s *UpdateLocationHdfsInput) SetQopConfiguration(v *QopConfiguration) *UpdateLocationHdfsInput { + s.QopConfiguration = v + return s +} + +// SetReplicationFactor sets the ReplicationFactor field's value. +func (s *UpdateLocationHdfsInput) SetReplicationFactor(v int64) *UpdateLocationHdfsInput { + s.ReplicationFactor = &v + return s +} + +// SetSimpleUser sets the SimpleUser field's value. +func (s *UpdateLocationHdfsInput) SetSimpleUser(v string) *UpdateLocationHdfsInput { + s.SimpleUser = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *UpdateLocationHdfsInput) SetSubdirectory(v string) *UpdateLocationHdfsInput { + s.Subdirectory = &v + return s +} + +type UpdateLocationHdfsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationHdfsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateLocationHdfsOutput) GoString() string { + return s.String() +} + type UpdateLocationNfsInput struct { _ struct{} `type:"structure"` @@ -9245,19 +10282,20 @@ func (s UpdateTaskExecutionOutput) GoString() string { type UpdateTaskInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource name of the CloudWatch LogGroup. + // The Amazon Resource Name (ARN) of the resource name of the Amazon CloudWatch + // log group. CloudWatchLogGroupArn *string `type:"string"` // A list of filter rules that determines which files to exclude from a task. // The list should contain a single filter string that consists of the patterns - // to exclude. The patterns are delimited by "|" (that is, a pipe), for example: - // "/folder1|/folder2" + // to exclude. The patterns are delimited by "|" (that is, a pipe), for example, + // "/folder1|/folder2". Excludes []*FilterRule `type:"list"` // A list of filter rules that determines which files to include when running - // a task. The pattern should contain a single filter string that consists of - // the patterns to include. The patterns are delimited by "|" (that is, a pipe). - // For example: "/folder1|/folder2" + // a task. The pattern contains a single filter string that consists of the + // patterns to include. The patterns are delimited by "|" (that is, a pipe), + // for example, "/folder1|/folder2". Includes []*FilterRule `type:"list"` // The name of the task to update. @@ -9485,6 +10523,70 @@ func Gid_Values() []string { } } +const ( + // HdfsAuthenticationTypeSimple is a HdfsAuthenticationType enum value + HdfsAuthenticationTypeSimple = "SIMPLE" + + // HdfsAuthenticationTypeKerberos is a HdfsAuthenticationType enum value + HdfsAuthenticationTypeKerberos = "KERBEROS" +) + +// HdfsAuthenticationType_Values returns all elements of the HdfsAuthenticationType enum +func HdfsAuthenticationType_Values() []string { + return []string{ + HdfsAuthenticationTypeSimple, + HdfsAuthenticationTypeKerberos, + } +} + +const ( + // HdfsDataTransferProtectionDisabled is a HdfsDataTransferProtection enum value + HdfsDataTransferProtectionDisabled = "DISABLED" + + // HdfsDataTransferProtectionAuthentication is a HdfsDataTransferProtection enum value + HdfsDataTransferProtectionAuthentication = "AUTHENTICATION" + + // HdfsDataTransferProtectionIntegrity is a HdfsDataTransferProtection enum value + HdfsDataTransferProtectionIntegrity = "INTEGRITY" + + // HdfsDataTransferProtectionPrivacy is a HdfsDataTransferProtection enum value + HdfsDataTransferProtectionPrivacy = "PRIVACY" +) + +// HdfsDataTransferProtection_Values returns all elements of the HdfsDataTransferProtection enum +func HdfsDataTransferProtection_Values() []string { + return []string{ + HdfsDataTransferProtectionDisabled, + HdfsDataTransferProtectionAuthentication, + HdfsDataTransferProtectionIntegrity, + HdfsDataTransferProtectionPrivacy, + } +} + +const ( + // HdfsRpcProtectionDisabled is a HdfsRpcProtection enum value + HdfsRpcProtectionDisabled = "DISABLED" + + // HdfsRpcProtectionAuthentication is a HdfsRpcProtection enum value + HdfsRpcProtectionAuthentication = "AUTHENTICATION" + + // HdfsRpcProtectionIntegrity is a HdfsRpcProtection enum value + HdfsRpcProtectionIntegrity = "INTEGRITY" + + // HdfsRpcProtectionPrivacy is a HdfsRpcProtection enum value + HdfsRpcProtectionPrivacy = "PRIVACY" +) + +// HdfsRpcProtection_Values returns all elements of the HdfsRpcProtection enum +func HdfsRpcProtection_Values() []string { + return []string{ + HdfsRpcProtectionDisabled, + HdfsRpcProtectionAuthentication, + HdfsRpcProtectionIntegrity, + HdfsRpcProtectionPrivacy, + } +} + const ( // LocationFilterNameLocationUri is a LocationFilterName enum value LocationFilterNameLocationUri = "LocationUri" diff --git a/service/datasync/datasynciface/interface.go b/service/datasync/datasynciface/interface.go index a9993a0f760..0804f822f9a 100644 --- a/service/datasync/datasynciface/interface.go +++ b/service/datasync/datasynciface/interface.go @@ -76,6 +76,10 @@ type DataSyncAPI interface { CreateLocationFsxWindowsWithContext(aws.Context, *datasync.CreateLocationFsxWindowsInput, ...request.Option) (*datasync.CreateLocationFsxWindowsOutput, error) CreateLocationFsxWindowsRequest(*datasync.CreateLocationFsxWindowsInput) (*request.Request, *datasync.CreateLocationFsxWindowsOutput) + CreateLocationHdfs(*datasync.CreateLocationHdfsInput) (*datasync.CreateLocationHdfsOutput, error) + CreateLocationHdfsWithContext(aws.Context, *datasync.CreateLocationHdfsInput, ...request.Option) (*datasync.CreateLocationHdfsOutput, error) + CreateLocationHdfsRequest(*datasync.CreateLocationHdfsInput) (*request.Request, *datasync.CreateLocationHdfsOutput) + CreateLocationNfs(*datasync.CreateLocationNfsInput) (*datasync.CreateLocationNfsOutput, error) CreateLocationNfsWithContext(aws.Context, *datasync.CreateLocationNfsInput, ...request.Option) (*datasync.CreateLocationNfsOutput, error) CreateLocationNfsRequest(*datasync.CreateLocationNfsInput) (*request.Request, *datasync.CreateLocationNfsOutput) @@ -120,6 +124,10 @@ type DataSyncAPI interface { DescribeLocationFsxWindowsWithContext(aws.Context, *datasync.DescribeLocationFsxWindowsInput, ...request.Option) (*datasync.DescribeLocationFsxWindowsOutput, error) DescribeLocationFsxWindowsRequest(*datasync.DescribeLocationFsxWindowsInput) (*request.Request, *datasync.DescribeLocationFsxWindowsOutput) + DescribeLocationHdfs(*datasync.DescribeLocationHdfsInput) (*datasync.DescribeLocationHdfsOutput, error) + DescribeLocationHdfsWithContext(aws.Context, *datasync.DescribeLocationHdfsInput, ...request.Option) (*datasync.DescribeLocationHdfsOutput, error) + DescribeLocationHdfsRequest(*datasync.DescribeLocationHdfsInput) (*request.Request, *datasync.DescribeLocationHdfsOutput) + DescribeLocationNfs(*datasync.DescribeLocationNfsInput) (*datasync.DescribeLocationNfsOutput, error) DescribeLocationNfsWithContext(aws.Context, *datasync.DescribeLocationNfsInput, ...request.Option) (*datasync.DescribeLocationNfsOutput, error) DescribeLocationNfsRequest(*datasync.DescribeLocationNfsInput) (*request.Request, *datasync.DescribeLocationNfsOutput) @@ -195,6 +203,10 @@ type DataSyncAPI interface { UpdateAgentWithContext(aws.Context, *datasync.UpdateAgentInput, ...request.Option) (*datasync.UpdateAgentOutput, error) UpdateAgentRequest(*datasync.UpdateAgentInput) (*request.Request, *datasync.UpdateAgentOutput) + UpdateLocationHdfs(*datasync.UpdateLocationHdfsInput) (*datasync.UpdateLocationHdfsOutput, error) + UpdateLocationHdfsWithContext(aws.Context, *datasync.UpdateLocationHdfsInput, ...request.Option) (*datasync.UpdateLocationHdfsOutput, error) + UpdateLocationHdfsRequest(*datasync.UpdateLocationHdfsInput) (*request.Request, *datasync.UpdateLocationHdfsOutput) + UpdateLocationNfs(*datasync.UpdateLocationNfsInput) (*datasync.UpdateLocationNfsOutput, error) UpdateLocationNfsWithContext(aws.Context, *datasync.UpdateLocationNfsInput, ...request.Option) (*datasync.UpdateLocationNfsOutput, error) UpdateLocationNfsRequest(*datasync.UpdateLocationNfsInput) (*request.Request, *datasync.UpdateLocationNfsOutput) diff --git a/service/finspace/api.go b/service/finspace/api.go index 202a09fdc9f..9fed5ed5c63 100644 --- a/service/finspace/api.go +++ b/service/finspace/api.go @@ -793,6 +793,16 @@ func (s *AccessDeniedException) RequestID() string { type CreateEnvironmentInput struct { _ struct{} `type:"structure"` + // The list of Amazon Resource Names (ARN) of the data bundles to install. Currently + // supported data bundle ARNs: + // + // * arn:aws:finspace:${Region}::data-bundle/capital-markets-sample - Contains + // sample Capital Markets datasets, categories and controlled vocabularies. + // + // * arn:aws:finspace:${Region}::data-bundle/taq (default) - Contains trades + // and quotes data in addition to sample Capital Markets data. + DataBundles []*string `locationName:"dataBundles" type:"list"` + // The description of the FinSpace environment to be created. Description *string `locationName:"description" min:"1" type:"string"` @@ -816,6 +826,9 @@ type CreateEnvironmentInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // Configuration information for the superuser. + SuperuserParameters *SuperuserParameters `locationName:"superuserParameters" type:"structure"` + // Add tags to your FinSpace environment. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -861,6 +874,11 @@ func (s *CreateEnvironmentInput) Validate() error { invalidParams.AddNested("FederationParameters", err.(request.ErrInvalidParams)) } } + if s.SuperuserParameters != nil { + if err := s.SuperuserParameters.Validate(); err != nil { + invalidParams.AddNested("SuperuserParameters", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -868,6 +886,12 @@ func (s *CreateEnvironmentInput) Validate() error { return nil } +// SetDataBundles sets the DataBundles field's value. +func (s *CreateEnvironmentInput) SetDataBundles(v []*string) *CreateEnvironmentInput { + s.DataBundles = v + return s +} + // SetDescription sets the Description field's value. func (s *CreateEnvironmentInput) SetDescription(v string) *CreateEnvironmentInput { s.Description = &v @@ -898,6 +922,12 @@ func (s *CreateEnvironmentInput) SetName(v string) *CreateEnvironmentInput { return s } +// SetSuperuserParameters sets the SuperuserParameters field's value. +func (s *CreateEnvironmentInput) SetSuperuserParameters(v *SuperuserParameters) *CreateEnvironmentInput { + s.SuperuserParameters = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateEnvironmentInput) SetTags(v map[string]*string) *CreateEnvironmentInput { s.Tags = v @@ -1843,6 +1873,94 @@ func (s *ServiceQuotaExceededException) RequestID() string { return s.RespMetadata.RequestID } +// Configuration information for the superuser. +type SuperuserParameters struct { + _ struct{} `type:"structure"` + + // The email address of the superuser. + // + // EmailAddress is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SuperuserParameters's + // String and GoString methods. + // + // EmailAddress is a required field + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string" required:"true" sensitive:"true"` + + // The first name of the superuser. + // + // FirstName is a required field + FirstName *string `locationName:"firstName" min:"1" type:"string" required:"true"` + + // The last name of the superuser. + // + // LastName is a required field + LastName *string `locationName:"lastName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuperuserParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuperuserParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SuperuserParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SuperuserParameters"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + if s.EmailAddress != nil && len(*s.EmailAddress) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmailAddress", 1)) + } + if s.FirstName == nil { + invalidParams.Add(request.NewErrParamRequired("FirstName")) + } + if s.FirstName != nil && len(*s.FirstName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FirstName", 1)) + } + if s.LastName == nil { + invalidParams.Add(request.NewErrParamRequired("LastName")) + } + if s.LastName != nil && len(*s.LastName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LastName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *SuperuserParameters) SetEmailAddress(v string) *SuperuserParameters { + s.EmailAddress = &v + return s +} + +// SetFirstName sets the FirstName field's value. +func (s *SuperuserParameters) SetFirstName(v string) *SuperuserParameters { + s.FirstName = &v + return s +} + +// SetLastName sets the LastName field's value. +func (s *SuperuserParameters) SetLastName(v string) *SuperuserParameters { + s.LastName = &v + return s +} + type TagResourceInput struct { _ struct{} `type:"structure"` diff --git a/service/finspace/doc.go b/service/finspace/doc.go index e8659632472..d642869c85b 100644 --- a/service/finspace/doc.go +++ b/service/finspace/doc.go @@ -3,8 +3,7 @@ // Package finspace provides the client and types for making API // requests to FinSpace User Environment Management service. // -// The FinSpace management service provides the APIs for managing the FinSpace -// environments. +// The FinSpace management service provides the APIs for managing FinSpace environments. // // See https://docs.aws.amazon.com/goto/WebAPI/finspace-2021-03-12 for more information on this service. // diff --git a/service/macie2/api.go b/service/macie2/api.go index 97a47b262f7..14a61b2b9b3 100644 --- a/service/macie2/api.go +++ b/service/macie2/api.go @@ -6286,7 +6286,8 @@ func (c *Macie2) UpdateMemberSessionRequest(input *UpdateMemberSessionInput) (re // UpdateMemberSession API operation for Amazon Macie 2. // -// Enables an Amazon Macie administrator to suspend or re-enable a member account. +// Enables an Amazon Macie administrator to suspend or re-enable Macie for a +// member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7538,9 +7539,10 @@ func (s *BucketLevelPermissions) SetBucketPolicy(v *BucketPolicy) *BucketLevelPe // Provides statistical data and other information about an S3 bucket that Amazon // Macie monitors and analyzes for your account. If an error occurs when Macie // attempts to retrieve and process information about the bucket or the bucket's -// objects, the value for most of these properties is null. Exceptions are accountId, -// bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify -// the cause of the error, refer to the errorCode and errorMessage values. +// objects, the value for the versioning property is false and the value for +// most other properties is null. Exceptions are accountId, bucketArn, bucketCreatedAt, +// bucketName, lastUpdated, and region. To identify the cause of the error, +// refer to the errorCode and errorMessage values. type BucketMetadata struct { _ struct{} `type:"structure"` @@ -8542,10 +8544,10 @@ func (s *CreateClassificationJobOutput) SetJobId(v string) *CreateClassification return s } -// Specifies the criteria and other settings for a custom data identifier. You -// can't change a custom data identifier after you create it. This helps ensure -// that you have an immutable history of sensitive data findings and discovery -// results for data privacy and protection audits or investigations. +// Specifies the detection criteria and other settings for a custom data identifier. +// You can't change a custom data identifier after you create it. This helps +// ensure that you have an immutable history of sensitive data findings and +// discovery results for data privacy and protection audits or investigations. type CreateCustomDataIdentifierInput struct { _ struct{} `type:"structure"` @@ -8563,6 +8565,21 @@ type CreateCustomDataIdentifierInput struct { Regex *string `locationName:"regex" type:"string"` + // The severity to assign to findings that the custom data identifier produces, + // based on the number of occurrences of text that matches the custom data identifier's + // detection criteria. You can specify as many as three SeverityLevel objects + // in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify + // more than one, the occurrences thresholds must be in ascending order by severity, + // moving from LOW to HIGH. For example, 1 for LOW, 50 for MEDIUM, and 100 for + // HIGH. If an S3 object contains fewer occurrences than the lowest specified + // threshold, Amazon Macie doesn't create a finding. + // + // If you don't specify any values for this array, Macie creates findings for + // S3 objects that contain at least one occurrence of text that matches the + // detection criteria, and Macie automatically assigns the MEDIUM severity to + // those findings. + SeverityLevels []*SeverityLevel `locationName:"severityLevels" type:"list"` + // A string-to-string map of key-value pairs that specifies the tags (keys and // values) for a classification job, custom data identifier, findings filter, // or member account. @@ -8587,6 +8604,26 @@ func (s CreateCustomDataIdentifierInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomDataIdentifierInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomDataIdentifierInput"} + if s.SeverityLevels != nil { + for i, v := range s.SeverityLevels { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SeverityLevels", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetClientToken sets the ClientToken field's value. func (s *CreateCustomDataIdentifierInput) SetClientToken(v string) *CreateCustomDataIdentifierInput { s.ClientToken = &v @@ -8629,6 +8666,12 @@ func (s *CreateCustomDataIdentifierInput) SetRegex(v string) *CreateCustomDataId return s } +// SetSeverityLevels sets the SeverityLevels field's value. +func (s *CreateCustomDataIdentifierInput) SetSeverityLevels(v []*SeverityLevel) *CreateCustomDataIdentifierInput { + s.SeverityLevels = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateCustomDataIdentifierInput) SetTags(v map[string]*string) *CreateCustomDataIdentifierInput { s.Tags = v @@ -9008,8 +9051,7 @@ func (s *CreateMemberOutput) SetArn(v string) *CreateMemberOutput { return s } -// Specifies the types of findings to include in a set of sample findings that -// Amazon Macie creates. +// Specifies the types of sample findings to create. type CreateSampleFindingsInput struct { _ struct{} `type:"structure"` @@ -11532,8 +11574,8 @@ func (s *GetCustomDataIdentifierInput) SetId(v string) *GetCustomDataIdentifierI return s } -// Provides information about the criteria and other settings for a custom data -// identifier. +// Provides information about the detection criteria and other settings for +// a custom data identifier. type GetCustomDataIdentifierOutput struct { _ struct{} `type:"structure"` @@ -11557,6 +11599,21 @@ type GetCustomDataIdentifierOutput struct { Regex *string `locationName:"regex" type:"string"` + // The severity to assign to findings that the custom data identifier produces, + // based on the number of occurrences of text that matches the custom data identifier's + // detection criteria. You can specify as many as three SeverityLevel objects + // in this array, one for each severity: LOW, MEDIUM, or HIGH. If you specify + // more than one, the occurrences thresholds must be in ascending order by severity, + // moving from LOW to HIGH. For example, 1 for LOW, 50 for MEDIUM, and 100 for + // HIGH. If an S3 object contains fewer occurrences than the lowest specified + // threshold, Amazon Macie doesn't create a finding. + // + // If you don't specify any values for this array, Macie creates findings for + // S3 objects that contain at least one occurrence of text that matches the + // detection criteria, and Macie automatically assigns the MEDIUM severity to + // those findings. + SeverityLevels []*SeverityLevel `locationName:"severityLevels" type:"list"` + // A string-to-string map of key-value pairs that specifies the tags (keys and // values) for a classification job, custom data identifier, findings filter, // or member account. @@ -11641,6 +11698,12 @@ func (s *GetCustomDataIdentifierOutput) SetRegex(v string) *GetCustomDataIdentif return s } +// SetSeverityLevels sets the SeverityLevels field's value. +func (s *GetCustomDataIdentifierOutput) SetSeverityLevels(v []*SeverityLevel) *GetCustomDataIdentifierOutput { + s.SeverityLevels = v + return s +} + // SetTags sets the Tags field's value. func (s *GetCustomDataIdentifierOutput) SetTags(v map[string]*string) *GetCustomDataIdentifierOutput { s.Tags = v @@ -16820,6 +16883,69 @@ func (s *Severity) SetScore(v int64) *Severity { return s } +// Specifies a severity level for findings that a custom data identifier produces. +// A severity level determines which severity is assigned to the findings, based +// on the number of occurrences of text that matches the custom data identifier's +// detection criteria. +type SeverityLevel struct { + _ struct{} `type:"structure"` + + // OccurrencesThreshold is a required field + OccurrencesThreshold *int64 `locationName:"occurrencesThreshold" type:"long" required:"true"` + + // The severity of a finding, ranging from LOW, for least severe, to HIGH, for + // most severe. Valid values are: + // + // Severity is a required field + Severity *string `locationName:"severity" type:"string" required:"true" enum:"DataIdentifierSeverity"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SeverityLevel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SeverityLevel) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SeverityLevel) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SeverityLevel"} + if s.OccurrencesThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("OccurrencesThreshold")) + } + if s.Severity == nil { + invalidParams.Add(request.NewErrParamRequired("Severity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOccurrencesThreshold sets the OccurrencesThreshold field's value. +func (s *SeverityLevel) SetOccurrencesThreshold(v int64) *SeverityLevel { + s.OccurrencesThreshold = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *SeverityLevel) SetSeverity(v string) *SeverityLevel { + s.Severity = &v + return s +} + // Specifies a property-based condition that determines whether an S3 bucket // is included or excluded from a classification job. type SimpleCriterionForJob struct { @@ -17874,7 +18000,7 @@ func (s UpdateMacieSessionOutput) GoString() string { return s.String() } -// Suspends (pauses) or re-enables an Amazon Macie member account. +// Suspends (pauses) or re-enables Amazon Macie for a member account. type UpdateMemberSessionInput struct { _ struct{} `type:"structure"` @@ -18645,6 +18771,28 @@ func Currency_Values() []string { } } +// The severity of a finding, ranging from LOW, for least severe, to HIGH, for +// most severe. Valid values are: +const ( + // DataIdentifierSeverityLow is a DataIdentifierSeverity enum value + DataIdentifierSeverityLow = "LOW" + + // DataIdentifierSeverityMedium is a DataIdentifierSeverity enum value + DataIdentifierSeverityMedium = "MEDIUM" + + // DataIdentifierSeverityHigh is a DataIdentifierSeverity enum value + DataIdentifierSeverityHigh = "HIGH" +) + +// DataIdentifierSeverity_Values returns all elements of the DataIdentifierSeverity enum +func DataIdentifierSeverity_Values() []string { + return []string{ + DataIdentifierSeverityLow, + DataIdentifierSeverityMedium, + DataIdentifierSeverityHigh, + } +} + const ( // DayOfWeekSunday is a DayOfWeek enum value DayOfWeekSunday = "SUNDAY"