diff --git a/CHANGELOG.md b/CHANGELOG.md index b795b9f742e..b23536f2288 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.54.17 (2024-07-09) +=== + +### Service Client Updates +* `service/datazone`: Updates service API +* `service/fsx`: Updates service API and documentation +* `service/opensearch`: Updates service API and documentation +* `service/sagemaker`: Updates service API, documentation, and paginators + * This release 1/ enables optimization jobs that allows customers to perform Ahead-of-time compilation and quantization. 2/ allows customers to control access to Amazon Q integration in SageMaker Studio. 3/ enables AdditionalModelDataSources for CreateModel action. + Release v1.54.16 (2024-07-08) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 33e8062ddc0..27446c59eb1 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -46674,40 +46674,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -47683,22 +47663,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, diff --git a/aws/version.go b/aws/version.go index c8a68cc761a..a6b3fe4364a 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.54.16" +const SDKVersion = "1.54.17" diff --git a/models/apis/datazone/2018-05-10/api-2.json b/models/apis/datazone/2018-05-10/api-2.json index d8a12046e55..3d6b52272a1 100644 --- a/models/apis/datazone/2018-05-10/api-2.json +++ b/models/apis/datazone/2018-05-10/api-2.json @@ -3698,11 +3698,15 @@ "members":{ "domainId":{"shape":"DomainId"}, "itemId":{"shape":"DataProductId"} - } + }, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataProductItems":{ "type":"list", "member":{"shape":"DataProductItem"}, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated.", "max":100, "min":0 }, @@ -3732,7 +3736,9 @@ "owningProjectId":{"shape":"ProjectId"}, "updatedAt":{"shape":"UpdatedAt"}, "updatedBy":{"shape":"UpdatedBy"} - } + }, + "deprecated":true, + "deprecatedMessage":"This structure is deprecated." }, "DataSourceConfigurationInput":{ "type":"structure", @@ -8293,7 +8299,11 @@ "type":"structure", "members":{ "assetItem":{"shape":"AssetItem"}, - "dataProductItem":{"shape":"DataProductSummary"}, + "dataProductItem":{ + "shape":"DataProductSummary", + "deprecated":true, + "deprecatedMessage":"This field is deprecated." + }, "glossaryItem":{"shape":"GlossaryItem"}, "glossaryTermItem":{"shape":"GlossaryTermItem"} }, diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index 558fa1a7668..6f7d226ebc2 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -11,7 +11,8 @@ "signatureVersion":"v4", "signingName":"fsx", "targetPrefix":"AWSSimbaAPIService_v20180301", - "uid":"fsx-2018-03-01" + "uid":"fsx-2018-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateFileSystemAliases":{ @@ -846,7 +847,8 @@ "STORAGE_TYPE_OPTIMIZATION", "MISCONFIGURED_STATE_RECOVERY", "VOLUME_UPDATE_WITH_SNAPSHOT", - "VOLUME_INITIALIZE_WITH_SNAPSHOT" + "VOLUME_INITIALIZE_WITH_SNAPSHOT", + "DOWNLOAD_DATA_FROM_BACKUP" ] }, "AdministrativeActions":{ @@ -2978,7 +2980,8 @@ "enum":[ "MULTI_AZ_1", "SINGLE_AZ_1", - "SINGLE_AZ_2" + "SINGLE_AZ_2", + "MULTI_AZ_2" ] }, "OntapEndpointIpAddresses":{ @@ -3088,6 +3091,8 @@ "enum":[ "SINGLE_AZ_1", "SINGLE_AZ_2", + "SINGLE_AZ_HA_1", + "SINGLE_AZ_HA_2", "MULTI_AZ_1" ] }, @@ -3614,7 +3619,8 @@ "IN_PROGRESS", "PENDING", "COMPLETED", - "UPDATED_OPTIMIZING" + "UPDATED_OPTIMIZING", + "OPTIMIZING" ] }, "StorageCapacity":{ @@ -3971,7 +3977,8 @@ "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, "AddRouteTableIds":{"shape":"RouteTableIds"}, "RemoveRouteTableIds":{"shape":"RouteTableIds"}, - "ThroughputCapacityPerHAPair":{"shape":"ThroughputCapacityPerHAPair"} + "ThroughputCapacityPerHAPair":{"shape":"ThroughputCapacityPerHAPair"}, + "HAPairs":{"shape":"HAPairs"} } }, "UpdateFileSystemOpenZFSConfiguration":{ diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 1c12c0b2363..9cc606ce271 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -107,7 +107,7 @@ } }, "AdministrativeActionType": { - "base": "
Describes the type of administrative action, as follows:
FILE_SYSTEM_UPDATE
- A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem
), or CLI (update-file-system
).
THROUGHPUT_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a THROUGHPUT_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When THROUGHPUT_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing throughput capacity in the Amazon FSx for Windows File Server User Guide.
STORAGE_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION
task starts.
For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.
For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.
You can track the storage-optimization progress using the ProgressPercent
property. When STORAGE_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
FILE_SYSTEM_ALIAS_ASSOCIATION
- A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.
FILE_SYSTEM_ALIAS_DISASSOCIATION
- A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.
IOPS_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a IOPS_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When IOPS_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing provisioned SSD IOPS in the Amazon FSx for Windows File Server User Guide.
STORAGE_TYPE_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a STORAGE_TYPE_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When STORAGE_TYPE_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
.
VOLUME_UPDATE
- A volume update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume
), or CLI (update-volume
).
VOLUME_RESTORE
- An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot
) or CLI (restore-volume-from-snapshot
).
SNAPSHOT_UPDATE
- A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot
), or CLI (update-snapshot
).
RELEASE_NFS_V3_LOCKS
- Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.
VOLUME_INITIALIZE_WITH_SNAPSHOT
- A volume is being created from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CreateVolume
), or CLI (create-volume
) when using the using the FULL_COPY
strategy.
VOLUME_UPDATE_WITH_SNAPSHOT
- A volume is being updated from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CopySnapshotAndUpdateVolume
), or CLI (copy-snapshot-and-update-volume
).
Describes the type of administrative action, as follows:
FILE_SYSTEM_UPDATE
- A file system update administrative action initiated from the Amazon FSx console, API (UpdateFileSystem
), or CLI (update-file-system
).
THROUGHPUT_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a THROUGHPUT_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When THROUGHPUT_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing throughput capacity in the Amazon FSx for Windows File Server User Guide.
STORAGE_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's storage capacity has completed successfully, a STORAGE_OPTIMIZATION
task starts.
For Windows and ONTAP, storage optimization is the process of migrating the file system data to newer larger disks.
For Lustre, storage optimization consists of rebalancing the data across the existing and newly added file servers.
You can track the storage-optimization progress using the ProgressPercent
property. When STORAGE_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide, Managing storage capacity in the Amazon FSx for Lustre User Guide, and Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
FILE_SYSTEM_ALIAS_ASSOCIATION
- A file system update to associate a new Domain Name System (DNS) alias with the file system. For more information, see AssociateFileSystemAliases.
FILE_SYSTEM_ALIAS_DISASSOCIATION
- A file system update to disassociate a DNS alias from the file system. For more information, see DisassociateFileSystemAliases.
IOPS_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a IOPS_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When IOPS_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
. For more information, see Managing provisioned SSD IOPS in the Amazon FSx for Windows File Server User Guide.
STORAGE_TYPE_OPTIMIZATION
- After the FILE_SYSTEM_UPDATE
task to increase a file system's throughput capacity has been completed successfully, a STORAGE_TYPE_OPTIMIZATION
task starts.
You can track the storage-optimization progress using the ProgressPercent
property. When STORAGE_TYPE_OPTIMIZATION
has been completed successfully, the parent FILE_SYSTEM_UPDATE
action status changes to COMPLETED
.
VOLUME_UPDATE
- A volume update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateVolume
), or CLI (update-volume
).
VOLUME_RESTORE
- An Amazon FSx for OpenZFS volume is returned to the state saved by the specified snapshot, initiated from an API (RestoreVolumeFromSnapshot
) or CLI (restore-volume-from-snapshot
).
SNAPSHOT_UPDATE
- A snapshot update to an Amazon FSx for OpenZFS volume initiated from the Amazon FSx console, API (UpdateSnapshot
), or CLI (update-snapshot
).
RELEASE_NFS_V3_LOCKS
- Tracks the release of Network File System (NFS) V3 locks on an Amazon FSx for OpenZFS file system.
DOWNLOAD_DATA_FROM_BACKUP
- An FSx for ONTAP backup is being restored to a new volume on a second-generation file system. Once the all the file metadata is loaded onto the volume, you can mount the volume with read-only access. during this process.
VOLUME_INITIALIZE_WITH_SNAPSHOT
- A volume is being created from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CreateVolume
), or CLI (create-volume
) when using the using the FULL_COPY
strategy.
VOLUME_UPDATE_WITH_SNAPSHOT
- A volume is being updated from a snapshot on a different FSx for OpenZFS file system. You can initiate this from the Amazon FSx console, API (CopySnapshotAndUpdateVolume
), or CLI (copy-snapshot-and-update-volume
).
The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The strings in the value of Aggregates
are not are not formatted as aggrX
, where X is a number between 1 and 6.
The value of Aggregates
contains aggregates that are not present.
One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The strings in the value of Aggregates
are not are not formatted as aggrX
, where X is a number between 1 and 12.
The value of Aggregates
contains aggregates that are not present.
One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
Used to specify the names of aggregates on which the volume will be created.
" } }, @@ -1259,7 +1259,7 @@ "base": "A structure providing details of any failures that occurred.
", "refs": { "FileCache$FailureDetails": "A structure providing details of any failures that occurred.
", - "FileCacheCreating$FailureDetails": "A structure providing details of any failures that occurred.
" + "FileCacheCreating$FailureDetails": "A structure providing details of any failures that occurred in creating a cache.
" } }, "FileCacheId": { @@ -1549,8 +1549,9 @@ "HAPairs": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$HAPairs": "Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity
, Iops
, and ThroughputCapacity
. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of HAPairs
is less than 1 or greater than 12.
The value of HAPairs
is greater than 1 and the value of DeploymentType
is SINGLE_AZ_1
or MULTI_AZ_1
.
Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity
, Iops
, and ThroughputCapacity
. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of HAPairs
is less than 1 or greater than 12.
The value of HAPairs
is greater than 1 and the value of DeploymentType
is SINGLE_AZ_1
or MULTI_AZ_1
.
Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of StorageCapacity
, Iops
, and ThroughputCapacity
. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of HAPairs
is less than 1 or greater than 12.
The value of HAPairs
is greater than 1 and the value of DeploymentType
is SINGLE_AZ_1
, MULTI_AZ_1
, or MULTI_AZ_2
.
Specifies how many high-availability (HA) file server pairs the file system will have. The default value is 1. The value of this property affects the values of StorageCapacity
, Iops
, and ThroughputCapacity
. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of HAPairs
is less than 1 or greater than 12.
The value of HAPairs
is greater than 1 and the value of DeploymentType
is SINGLE_AZ_1
, MULTI_AZ_1
, or MULTI_AZ_2
.
Use to update the number of high-availability (HA) pairs for a second-generation single-AZ file system. If you increase the number of HA pairs for your file system, you must specify proportional increases for StorageCapacity
, Iops
, and ThroughputCapacity
. For more information, see High-availability (HA) pairs in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see Using block storage protocols.
Specifies the FSx for ONTAP file system deployment type to use in creating the file system.
MULTI_AZ_1
- (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.
SINGLE_AZ_1
- A file system configured for Single-AZ redundancy.
SINGLE_AZ_2
- A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.
For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.
", - "OntapFileSystemConfiguration$DeploymentType": "Specifies the FSx for ONTAP file system deployment type in use in the file system.
MULTI_AZ_1
- (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.
SINGLE_AZ_1
- A file system configured for Single-AZ redundancy.
SINGLE_AZ_2
- A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.
For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.
" + "CreateFileSystemOntapConfiguration$DeploymentType": "Specifies the FSx for ONTAP file system deployment type to use in creating the file system.
MULTI_AZ_1
- A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.
MULTI_AZ_2
- A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.
SINGLE_AZ_1
- A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.
SINGLE_AZ_2
- A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.
For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing a file system deployment type.
", + "OntapFileSystemConfiguration$DeploymentType": "Specifies the FSx for ONTAP file system deployment type in use in the file system.
MULTI_AZ_1
- A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.
MULTI_AZ_2
- A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.
SINGLE_AZ_1
- A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.
SINGLE_AZ_2
- A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.
For information about the use cases for Multi-AZ and Single-AZ deployments, refer to Choosing Multi-AZ or Single-AZ file system deployment.
" } }, "OntapEndpointIpAddresses": { @@ -2017,8 +2018,8 @@ "OpenZFSDeploymentType": { "base": null, "refs": { - "CreateFileSystemOpenZFSConfiguration$DeploymentType": "Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following:
MULTI_AZ_1
- Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). Multi_AZ_1
is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.
SINGLE_AZ_1
- Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1
is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available.
SINGLE_AZ_2
- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2
is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services Regions.
For more information, see Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.
", - "OpenZFSFileSystemConfiguration$DeploymentType": "Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
MULTI_AZ_1
, SINGLE_AZ_1
, and SINGLE_AZ_2
.
Specifies the file system deployment type. Valid values are the following:
MULTI_AZ_1
- Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same Amazon Web Services Region.
SINGLE_AZ_HA_2
- Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.
SINGLE_AZ_HA_1
- Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.
SINGLE_AZ_2
- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone.
SINGLE_AZ_1
- Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.
For a list of which Amazon Web Services Regions each deployment type is available in, see Deployment type availability. For more information on the differences in performance between deployment types, see File system performance in the Amazon FSx for OpenZFS User Guide.
", + "OpenZFSFileSystemConfiguration$DeploymentType": "Specifies the file-system deployment type. Amazon FSx for OpenZFS supports
MULTI_AZ_1
, SINGLE_AZ_HA_2
, SINGLE_AZ_HA_1
, SINGLE_AZ_2
, and SINGLE_AZ_1
.
Displays the current percent of progress of an asynchronous task.
", "refs": { - "AdministrativeAction$ProgressPercent": "The percentage-complete status of a STORAGE_OPTIMIZATION
administrative action. Does not apply to any other administrative action type.
The percentage-complete status of a STORAGE_OPTIMIZATION
or DOWNLOAD_DATA_FROM_BACKUP
administrative action. Does not apply to any other administrative action type.
The status of the administrative action, as follows:
FAILED
- Amazon FSx failed to process the administrative action successfully.
IN_PROGRESS
- Amazon FSx is processing the administrative action.
PENDING
- Amazon FSx is waiting to process the administrative action.
COMPLETED
- Amazon FSx has finished processing the administrative task.
UPDATED_OPTIMIZING
- For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process.
The status of the administrative action, as follows:
FAILED
- Amazon FSx failed to process the administrative action successfully.
IN_PROGRESS
- Amazon FSx is processing the administrative action.
PENDING
- Amazon FSx is waiting to process the administrative action.
COMPLETED
- Amazon FSx has finished processing the administrative task.
For a backup restore to a second-generation FSx for ONTAP file system, indicates that all data has been downloaded to the volume, and clients now have read-write access to volume.
UPDATED_OPTIMIZING
- For a storage-capacity increase update, Amazon FSx has updated the file system with the new storage capacity, and is now performing the storage-optimization process.
PENDING
- For a backup restore to a second-generation FSx for ONTAP file system, indicates that the file metadata is being downloaded onto the volume. The volume's Lifecycle state is CREATING.
IN_PROGRESS
- For a backup restore to a second-generation FSx for ONTAP file system, indicates that all metadata has been downloaded to the new volume and client can access data with read-only access while Amazon FSx downloads the file data to the volume. Track the progress of this process with the ProgressPercent
element.
The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and subnets in the Amazon VPC User Guide.
", "refs": { - "CreateFileSystemOntapConfiguration$PreferredSubnetId": "Required when DeploymentType
is set to MULTI_AZ_1
. This specifies the subnet in which you want the preferred file server to be located.
Required when DeploymentType
is set to MULTI_AZ_1
or MULTI_AZ_2
. This specifies the subnet in which you want the preferred file server to be located.
Required when DeploymentType
is set to MULTI_AZ_1
. This specifies the subnet in which you want the preferred file server to be located.
Required when DeploymentType
is set to MULTI_AZ_1
. This specifies the subnet in which you want the preferred file server to be located. For in-Amazon Web Services applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency.
The subnet ID that is either invalid or not part of the VPC specified.
", @@ -2768,9 +2769,9 @@ "ThroughputCapacityPerHAPair": { "base": null, "refs": { - "CreateFileSystemOntapConfiguration$ThroughputCapacityPerHAPair": "Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.
You can define either the ThroughputCapacityPerHAPair
or the ThroughputCapacity
when creating a file system, but not both.
This field and ThroughputCapacity
are the same for scale-up file systems powered by one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
file systems, valid values are 3072 or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value for file systems with one HA pair.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is a valid HA pair (a value between 2 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Use to choose the throughput capacity per HA pair. When the value of HAPairs
is equal to 1, the value of ThroughputCapacityPerHAPair
is the total throughput for the file system.
This field and ThroughputCapacity
cannot be defined in the same API call, but one is required.
This field and ThroughputCapacity
are the same for file systems with one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
, valid values are 3072 or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is a valid HA pair (a value between 2 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.
This field and ThroughputCapacity
cannot be defined in the same API call, but one is required.
This field and ThroughputCapacity
are the same for file systems with one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
, valid values are 3072 or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value for file systems with one HA pair.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is a valid HA pair (a value between 2 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.
You can define either the ThroughputCapacityPerHAPair
or the ThroughputCapacity
when creating a file system, but not both.
This field and ThroughputCapacity
are the same for file systems powered by one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
, valid values are 1536, 3072, or 6144 MBps.
For MULTI_AZ_2
, valid values are 384, 768, 1536, 3072, or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value for file systems with one HA pair.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is not a valid HA pair (a value between 1 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Use to choose the throughput capacity per HA pair. When the value of HAPairs
is equal to 1, the value of ThroughputCapacityPerHAPair
is the total throughput for the file system.
This field and ThroughputCapacity
cannot be defined in the same API call, but one is required.
This field and ThroughputCapacity
are the same for file systems with one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
, valid values are 1536, 3072, or 6144 MBps.
For MULTI_AZ_2
, valid values are 384, 768, 1536, 3072, or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is not a valid HA pair (a value between 1 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system.
This field and ThroughputCapacity
cannot be defined in the same API call, but one is required.
This field and ThroughputCapacity
are the same for file systems with one HA pair.
For SINGLE_AZ_1
and MULTI_AZ_1
file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
For SINGLE_AZ_2
, valid values are 1536, 3072, or 6144 MBps.
For MULTI_AZ_2
, valid values are 384, 768, 1536, 3072, or 6144 MBps.
Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
The value of ThroughputCapacity
and ThroughputCapacityPerHAPair
are not the same value for file systems with one HA pair.
The value of deployment type is SINGLE_AZ_2
and ThroughputCapacity
/ ThroughputCapacityPerHAPair
is not a valid HA pair (a value between 1 and 12).
The value of ThroughputCapacityPerHAPair
is not a valid value.
Allows you to either upgrade your Amazon OpenSearch Service domain or perform an upgrade eligibility check to a compatible version of OpenSearch or Elasticsearch.
" }, "shapes": { + "AIMLOptionsInput": { + "base": "Container for parameters required to enable all machine learning features.
", + "refs": { + "CreateDomainRequest$AIMLOptions": "Options for all machine learning features for the specified domain.
", + "UpdateDomainConfigRequest$AIMLOptions": "Options for all machine learning features for the specified domain.
" + } + }, + "AIMLOptionsOutput": { + "base": "Container for parameters representing the state of machine learning features on the specified domain.
", + "refs": { + "AIMLOptionsStatus$Options": "Machine learning options on the specified domain.
", + "DomainStatus$AIMLOptions": "Container for parameters required to enable all machine learning features.
" + } + }, + "AIMLOptionsStatus": { + "base": "The status of machine learning options on the specified domain.
", + "refs": { + "DomainConfig$AIMLOptions": "Container for parameters required to enable all machine learning features.
" + } + }, "ARN": { "base": "The Amazon Resource Name (ARN) of the domain. See Identifiers for IAM Entities in Using Amazon Web Services Identity and Access Management for more information.
", "refs": { @@ -1894,6 +1914,31 @@ "DomainStatus$ModifyingProperties": "Information about the domain properties that are currently being modified.
" } }, + "NaturalLanguageQueryGenerationCurrentState": { + "base": null, + "refs": { + "NaturalLanguageQueryGenerationOptionsOutput$CurrentState": "The current state of the natural language query generation feature, indicating completion, in progress, or failure.
" + } + }, + "NaturalLanguageQueryGenerationDesiredState": { + "base": null, + "refs": { + "NaturalLanguageQueryGenerationOptionsInput$DesiredState": "The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.
", + "NaturalLanguageQueryGenerationOptionsOutput$DesiredState": "The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.
" + } + }, + "NaturalLanguageQueryGenerationOptionsInput": { + "base": "Container for parameters required to enable the natural language query generation feature.
", + "refs": { + "AIMLOptionsInput$NaturalLanguageQueryGenerationOptions": "Container for parameters required for natural language query generation on the specified domain.
" + } + }, + "NaturalLanguageQueryGenerationOptionsOutput": { + "base": "Container for parameters representing the state of the natural language query generation feature on the specified domain.
", + "refs": { + "AIMLOptionsOutput$NaturalLanguageQueryGenerationOptions": "Container for parameters required for natural language query generation on the specified domain.
" + } + }, "NextToken": { "base": "When nextToken
is returned, there are more results available. The value of nextToken
is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.
Provides the current status of an entity.
", "refs": { + "AIMLOptionsStatus$Status": null, "AccessPoliciesStatus$Status": "The status of the access policy for the domain.
", "AdvancedOptionsStatus$Status": "The status of advanced options for the specified domain.
", "AdvancedSecurityOptionsStatus$Status": "Status of the fine-grained access control settings for a domain.
", diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index ed84785734e..113d8706f01 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -608,6 +608,19 @@ {"shape":"ResourceLimitExceeded"} ] }, + "CreateOptimizationJob":{ + "name":"CreateOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateOptimizationJobRequest"}, + "output":{"shape":"CreateOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceInUse"}, + {"shape":"ResourceLimitExceeded"} + ] + }, "CreatePipeline":{ "name":"CreatePipeline", "http":{ @@ -1240,6 +1253,17 @@ }, "input":{"shape":"DeleteNotebookInstanceLifecycleConfigInput"} }, + "DeleteOptimizationJob":{ + "name":"DeleteOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "DeletePipeline":{ "name":"DeletePipeline", "http":{ @@ -1896,6 +1920,18 @@ "input":{"shape":"DescribeNotebookInstanceLifecycleConfigInput"}, "output":{"shape":"DescribeNotebookInstanceLifecycleConfigOutput"} }, + "DescribeOptimizationJob":{ + "name":"DescribeOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOptimizationJobRequest"}, + "output":{"shape":"DescribeOptimizationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "DescribePipeline":{ "name":"DescribePipeline", "http":{ @@ -2718,6 +2754,15 @@ "input":{"shape":"ListNotebookInstancesInput"}, "output":{"shape":"ListNotebookInstancesOutput"} }, + "ListOptimizationJobs":{ + "name":"ListOptimizationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListOptimizationJobsRequest"}, + "output":{"shape":"ListOptimizationJobsResponse"} + }, "ListPipelineExecutionSteps":{ "name":"ListPipelineExecutionSteps", "http":{ @@ -3203,6 +3248,17 @@ }, "input":{"shape":"StopNotebookInstanceInput"} }, + "StopOptimizationJob":{ + "name":"StopOptimizationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopOptimizationJobRequest"}, + "errors":[ + {"shape":"ResourceNotFound"} + ] + }, "StopPipelineExecution":{ "name":"StopPipelineExecution", "http":{ @@ -3845,6 +3901,29 @@ "max":15, "min":1 }, + "AdditionalModelChannelName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[A-Za-z0-9\\.\\-_]+" + }, + "AdditionalModelDataSource":{ + "type":"structure", + "required":[ + "ChannelName", + "S3DataSource" + ], + "members":{ + "ChannelName":{"shape":"AdditionalModelChannelName"}, + "S3DataSource":{"shape":"S3ModelDataSource"} + } + }, + "AdditionalModelDataSources":{ + "type":"list", + "member":{"shape":"AdditionalModelDataSource"}, + "max":5, + "min":0 + }, "AdditionalS3DataSource":{ "type":"structure", "required":[ @@ -4028,6 +4107,13 @@ "ValidationProfiles":{"shape":"AlgorithmValidationProfiles"} } }, + "AmazonQSettings":{ + "type":"structure", + "members":{ + "Status":{"shape":"FeatureStatus"}, + "QProfileArn":{"shape":"QProfileArn"} + } + }, "AnnotationConsolidationConfig":{ "type":"structure", "required":["AnnotationConsolidationLambdaArn"], @@ -6350,6 +6436,7 @@ "Mode":{"shape":"ContainerMode"}, "ModelDataUrl":{"shape":"Url"}, "ModelDataSource":{"shape":"ModelDataSource"}, + "AdditionalModelDataSources":{"shape":"AdditionalModelDataSources"}, "Environment":{"shape":"EnvironmentMap"}, "ModelPackageName":{"shape":"VersionedArnOrName"}, "InferenceSpecificationName":{"shape":"InferenceSpecificationName"}, @@ -7553,6 +7640,37 @@ "NotebookInstanceArn":{"shape":"NotebookInstanceArn"} } }, + "CreateOptimizationJobRequest":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "RoleArn", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "StoppingCondition" + ], + "members":{ + "OptimizationJobName":{"shape":"EntityName"}, + "RoleArn":{"shape":"RoleArn"}, + "ModelSource":{"shape":"OptimizationJobModelSource"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationEnvironment":{"shape":"OptimizationJobEnvironmentVariables"}, + "OptimizationConfigs":{"shape":"OptimizationConfigs"}, + "OutputConfig":{"shape":"OptimizationJobOutputConfig"}, + "StoppingCondition":{"shape":"StoppingCondition"}, + "Tags":{"shape":"TagList"}, + "VpcConfig":{"shape":"OptimizationVpcConfig"} + } + }, + "CreateOptimizationJobResponse":{ + "type":"structure", + "required":["OptimizationJobArn"], + "members":{ + "OptimizationJobArn":{"shape":"OptimizationJobArn"} + } + }, "CreatePipelineRequest":{ "type":"structure", "required":[ @@ -8638,6 +8756,13 @@ "NotebookInstanceLifecycleConfigName":{"shape":"NotebookInstanceLifecycleConfigName"} } }, + "DeleteOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, "DeletePipelineRequest":{ "type":"structure", "required":[ @@ -10318,6 +10443,48 @@ "InstanceMetadataServiceConfiguration":{"shape":"InstanceMetadataServiceConfiguration"} } }, + "DescribeOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, + "DescribeOptimizationJobResponse":{ + "type":"structure", + "required":[ + "OptimizationJobArn", + "OptimizationJobStatus", + "CreationTime", + "LastModifiedTime", + "OptimizationJobName", + "ModelSource", + "DeploymentInstanceType", + "OptimizationConfigs", + "OutputConfig", + "RoleArn", + "StoppingCondition" + ], + "members":{ + "OptimizationJobArn":{"shape":"OptimizationJobArn"}, + "OptimizationJobStatus":{"shape":"OptimizationJobStatus"}, + "OptimizationStartTime":{"shape":"Timestamp"}, + "OptimizationEndTime":{"shape":"Timestamp"}, + "CreationTime":{"shape":"CreationTime"}, + "LastModifiedTime":{"shape":"LastModifiedTime"}, + "FailureReason":{"shape":"FailureReason"}, + "OptimizationJobName":{"shape":"EntityName"}, + "ModelSource":{"shape":"OptimizationJobModelSource"}, + "OptimizationEnvironment":{"shape":"OptimizationJobEnvironmentVariables"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationConfigs":{"shape":"OptimizationConfigs"}, + "OutputConfig":{"shape":"OptimizationJobOutputConfig"}, + "OptimizationOutput":{"shape":"OptimizationOutput"}, + "RoleArn":{"shape":"RoleArn"}, + "StoppingCondition":{"shape":"StoppingCondition"}, + "VpcConfig":{"shape":"OptimizationVpcConfig"} + } + }, "DescribePipelineDefinitionForExecutionRequest":{ "type":"structure", "required":["PipelineExecutionArn"], @@ -11047,7 +11214,8 @@ "SecurityGroupIds":{"shape":"DomainSecurityGroupIds"}, "RStudioServerProDomainSettings":{"shape":"RStudioServerProDomainSettings"}, "ExecutionRoleIdentityConfig":{"shape":"ExecutionRoleIdentityConfig"}, - "DockerSettings":{"shape":"DockerSettings"} + "DockerSettings":{"shape":"DockerSettings"}, + "AmazonQSettings":{"shape":"AmazonQSettings"} } }, "DomainSettingsForUpdate":{ @@ -11056,7 +11224,8 @@ "RStudioServerProDomainSettingsForUpdate":{"shape":"RStudioServerProDomainSettingsForUpdate"}, "ExecutionRoleIdentityConfig":{"shape":"ExecutionRoleIdentityConfig"}, "SecurityGroupIds":{"shape":"DomainSecurityGroupIds"}, - "DockerSettings":{"shape":"DockerSettings"} + "DockerSettings":{"shape":"DockerSettings"}, + "AmazonQSettings":{"shape":"AmazonQSettings"} } }, "DomainStatus":{ @@ -15724,6 +15893,41 @@ "NotebookInstances":{"shape":"NotebookInstanceSummaryList"} } }, + "ListOptimizationJobsRequest":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "MaxResults":{ + "shape":"MaxResults", + "box":true + }, + "CreationTimeAfter":{"shape":"CreationTime"}, + "CreationTimeBefore":{"shape":"CreationTime"}, + "LastModifiedTimeAfter":{"shape":"LastModifiedTime"}, + "LastModifiedTimeBefore":{"shape":"LastModifiedTime"}, + "OptimizationContains":{"shape":"NameContains"}, + "NameContains":{"shape":"NameContains"}, + "StatusEquals":{"shape":"OptimizationJobStatus"}, + "SortBy":{"shape":"ListOptimizationJobsSortBy"}, + "SortOrder":{"shape":"SortOrder"} + } + }, + "ListOptimizationJobsResponse":{ + "type":"structure", + "required":["OptimizationJobSummaries"], + "members":{ + "OptimizationJobSummaries":{"shape":"OptimizationJobSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListOptimizationJobsSortBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "ListPipelineExecutionStepsRequest":{ "type":"structure", "members":{ @@ -16691,6 +16895,13 @@ "InvocationsMaxRetries":{"shape":"InvocationsMaxRetries"} } }, + "ModelCompilationConfig":{ + "type":"structure", + "members":{ + "Image":{"shape":"OptimizationContainerImage"}, + "OverrideEnvironment":{"shape":"OptimizationJobEnvironmentVariables"} + } + }, "ModelCompressionType":{ "type":"string", "enum":[ @@ -17247,6 +17458,13 @@ "GroundTruthS3Input":{"shape":"MonitoringGroundTruthS3Input"} } }, + "ModelQuantizationConfig":{ + "type":"structure", + "members":{ + "Image":{"shape":"OptimizationContainerImage"}, + "OverrideEnvironment":{"shape":"OptimizationJobEnvironmentVariables"} + } + }, "ModelRegisterSettings":{ "type":"structure", "members":{ @@ -18196,6 +18414,176 @@ "In" ] }, + "OptimizationConfig":{ + "type":"structure", + "members":{ + "ModelQuantizationConfig":{"shape":"ModelQuantizationConfig"}, + "ModelCompilationConfig":{"shape":"ModelCompilationConfig"} + }, + "union":true + }, + "OptimizationConfigs":{ + "type":"list", + "member":{"shape":"OptimizationConfig"}, + "max":10 + }, + "OptimizationContainerImage":{ + "type":"string", + "max":255, + "pattern":"[\\S]+" + }, + "OptimizationJobArn":{ + "type":"string", + "max":256, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:optimization-job/.*" + }, + "OptimizationJobDeploymentInstanceType":{ + "type":"string", + "enum":[ + "ml.p4d.24xlarge", + "ml.p4de.24xlarge", + "ml.p5.48xlarge", + "ml.g5.xlarge", + "ml.g5.2xlarge", + "ml.g5.4xlarge", + "ml.g5.8xlarge", + "ml.g5.12xlarge", + "ml.g5.16xlarge", + "ml.g5.24xlarge", + "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.12xlarge", + "ml.g6.16xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.inf2.xlarge", + "ml.inf2.8xlarge", + "ml.inf2.24xlarge", + "ml.inf2.48xlarge", + "ml.trn1.2xlarge", + "ml.trn1.32xlarge", + "ml.trn1n.32xlarge" + ] + }, + "OptimizationJobEnvironmentVariables":{ + "type":"map", + "key":{"shape":"NonEmptyString256"}, + "value":{"shape":"String256"}, + "max":25 + }, + "OptimizationJobModelSource":{ + "type":"structure", + "members":{ + "S3":{"shape":"OptimizationJobModelSourceS3"} + } + }, + "OptimizationJobModelSourceS3":{ + "type":"structure", + "members":{ + "S3Uri":{"shape":"S3Uri"}, + "ModelAccessConfig":{"shape":"OptimizationModelAccessConfig"} + } + }, + "OptimizationJobOutputConfig":{ + "type":"structure", + "required":["S3OutputLocation"], + "members":{ + "KmsKeyId":{"shape":"KmsKeyId"}, + "S3OutputLocation":{"shape":"S3Uri"} + } + }, + "OptimizationJobStatus":{ + "type":"string", + "enum":[ + "INPROGRESS", + "COMPLETED", + "FAILED", + "STARTING", + "STOPPING", + "STOPPED" + ] + }, + "OptimizationJobSummaries":{ + "type":"list", + "member":{"shape":"OptimizationJobSummary"} + }, + "OptimizationJobSummary":{ + "type":"structure", + "required":[ + "OptimizationJobName", + "OptimizationJobArn", + "CreationTime", + "OptimizationJobStatus", + "DeploymentInstanceType", + "OptimizationTypes" + ], + "members":{ + "OptimizationJobName":{"shape":"EntityName"}, + "OptimizationJobArn":{"shape":"OptimizationJobArn"}, + "CreationTime":{"shape":"CreationTime"}, + "OptimizationJobStatus":{"shape":"OptimizationJobStatus"}, + "OptimizationStartTime":{"shape":"Timestamp"}, + "OptimizationEndTime":{"shape":"Timestamp"}, + "LastModifiedTime":{"shape":"LastModifiedTime"}, + "DeploymentInstanceType":{"shape":"OptimizationJobDeploymentInstanceType"}, + "OptimizationTypes":{"shape":"OptimizationTypes"} + } + }, + "OptimizationModelAcceptEula":{"type":"boolean"}, + "OptimizationModelAccessConfig":{ + "type":"structure", + "required":["AcceptEula"], + "members":{ + "AcceptEula":{"shape":"OptimizationModelAcceptEula"} + } + }, + "OptimizationOutput":{ + "type":"structure", + "members":{ + "RecommendedInferenceImage":{"shape":"OptimizationContainerImage"} + } + }, + "OptimizationType":{"type":"string"}, + "OptimizationTypes":{ + "type":"list", + "member":{"shape":"OptimizationType"} + }, + "OptimizationVpcConfig":{ + "type":"structure", + "required":[ + "SecurityGroupIds", + "Subnets" + ], + "members":{ + "SecurityGroupIds":{"shape":"OptimizationVpcSecurityGroupIds"}, + "Subnets":{"shape":"OptimizationVpcSubnets"} + } + }, + "OptimizationVpcSecurityGroupId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSecurityGroupIds":{ + "type":"list", + "member":{"shape":"OptimizationVpcSecurityGroupId"}, + "max":5, + "min":1 + }, + "OptimizationVpcSubnetId":{ + "type":"string", + "max":32, + "pattern":"[-0-9a-zA-Z]+" + }, + "OptimizationVpcSubnets":{ + "type":"list", + "member":{"shape":"OptimizationVpcSubnetId"}, + "max":16, + "min":1 + }, "OptionalDouble":{"type":"double"}, "OptionalInteger":{"type":"integer"}, "OptionalVolumeSizeInGB":{ @@ -19529,6 +19917,10 @@ "ModelPackageGroupArn":{"shape":"ModelPackageGroupArn"} } }, + "QProfileArn":{ + "type":"string", + "pattern":"^arn:[-.a-z0-9]{1,63}:codewhisperer:([-.a-z0-9]{0,63}:){2}([a-zA-Z0-9-_:/]){1,1023}$" + }, "QualityCheckStepMetadata":{ "type":"structure", "members":{ @@ -21339,6 +21731,13 @@ "NotebookInstanceName":{"shape":"NotebookInstanceName"} } }, + "StopOptimizationJobRequest":{ + "type":"structure", + "required":["OptimizationJobName"], + "members":{ + "OptimizationJobName":{"shape":"EntityName"} + } + }, "StopPipelineExecutionRequest":{ "type":"structure", "required":[ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index d448f805112..a5cc59e24da 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -50,6 +50,7 @@ "CreateMonitoringSchedule": "Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint.
", "CreateNotebookInstance": "Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.
In a CreateNotebookInstance
request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.
SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework.
After receiving the request, SageMaker does the following:
Creates a network interface in the SageMaker VPC.
(Option) If you specified SubnetId
, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.
Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId
of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.
After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.
After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models.
For more information, see How It Works.
", "CreateNotebookInstanceLifecycleConfig": "Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", + "CreateOptimizationJob": "Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a source model, and you provide the settings for the optimization techniques that you want the job to apply. When the job completes successfully, SageMaker uploads the new optimized model to the output destination that you specify.
For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon SageMaker.
", "CreatePipeline": "Creates a pipeline using a JSON pipeline definition.
", "CreatePresignedDomainUrl": "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.
The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.
You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .
The URL that you get from a call to CreatePresignedDomainUrl
has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds
. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.
Returns a presigned URL that you can use to connect to the MLflow UI attached to your tracking server. For more information, see Launch the MLflow UI using a presigned URL.
", @@ -106,6 +107,7 @@ "DeleteMonitoringSchedule": "Deletes a monitoring schedule. Also stops the schedule had not already been stopped. This does not delete the job execution history of the monitoring schedule.
", "DeleteNotebookInstance": " Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you must call the StopNotebookInstance
API.
When you delete a notebook instance, you lose all of your data. SageMaker removes the ML compute instance, and deletes the ML storage volume and the network interface associated with the notebook instance.
Deletes a notebook instance lifecycle configuration.
", + "DeleteOptimizationJob": "Deletes an optimization job.
", "DeletePipeline": "Deletes a pipeline if there are no running instances of the pipeline. To delete a pipeline, you must stop all running instances of the pipeline using the StopPipelineExecution
API. When you delete a pipeline, all instances of the pipeline are deleted.
Delete the specified project.
", "DeleteSpace": "Used to delete a space.
", @@ -164,6 +166,7 @@ "DescribeMonitoringSchedule": "Describes the schedule for a monitoring job.
", "DescribeNotebookInstance": "Returns information about a notebook instance.
", "DescribeNotebookInstanceLifecycleConfig": "Returns a description of a notebook instance lifecycle configuration.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", + "DescribeOptimizationJob": "Provides the properties of the specified optimization job.
", "DescribePipeline": "Describes the details of a pipeline.
", "DescribePipelineDefinitionForExecution": "Describes the details of an execution's pipeline definition.
", "DescribePipelineExecution": "Describes the details of a pipeline execution.
", @@ -245,6 +248,7 @@ "ListMonitoringSchedules": "Returns list of all monitoring schedules.
", "ListNotebookInstanceLifecycleConfigs": "Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API.
", "ListNotebookInstances": "Returns a list of the SageMaker notebook instances in the requester's account in an Amazon Web Services Region.
", + "ListOptimizationJobs": "Lists the optimization jobs in your account and their properties.
", "ListPipelineExecutionSteps": "Gets a list of PipeLineExecutionStep
objects.
Gets a list of the pipeline executions.
", "ListPipelineParametersForExecution": "Gets a list of parameters for a pipeline execution.
", @@ -290,6 +294,7 @@ "StopMlflowTrackingServer": "Programmatically stop an MLflow Tracking Server.
", "StopMonitoringSchedule": "Stops a previously started monitoring schedule.
", "StopNotebookInstance": "Terminates the ML compute instance. Before terminating the instance, SageMaker disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker stops charging you for the ML compute instance when you call StopNotebookInstance
.
To access data on the ML storage volume for a notebook instance that has been terminated, call the StartNotebookInstance
API. StartNotebookInstance
launches another ML compute instance, configures it, and attaches the preserved ML storage volume so you can continue your work.
Ends a running inference optimization job.
", "StopPipelineExecution": "Stops a pipeline execution.
Callback Step
A pipeline execution won't stop while a callback step is running. When you call StopPipelineExecution
on a pipeline execution with a running callback step, SageMaker Pipelines sends an additional Amazon SQS message to the specified SQS queue. The body of the SQS message contains a \"Status\" field which is set to \"Stopping\".
You should add logic to your Amazon SQS message consumer to take any needed action (for example, resource cleanup) upon receipt of the message followed by a call to SendPipelineExecutionStepSuccess
or SendPipelineExecutionStepFailure
.
Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.
Lambda Step
A pipeline execution can't be stopped while a lambda step is running because the Lambda function invoked by the lambda step can't be stopped. If you attempt to stop the execution while the Lambda function is running, the pipeline waits for the Lambda function to finish or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function finishes, the pipeline execution status is Stopped
. If the timeout is hit the pipeline execution status is Failed
.
Stops a processing job.
", "StopTrainingJob": "Stops a training job. To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms might use this 120-second window to save the model artifacts, so the results of the training is not lost.
When it receives a StopTrainingJob
request, SageMaker changes the status of the job to Stopping
. After SageMaker stops the job, it sets the status to Stopped
.
An array of additional Inference Specification objects to be added to the existing array additional Inference Specification. Total number of additional Inference Specifications can not exceed 15. Each additional Inference Specification specifies artifacts based on this model package that can be used on inference endpoints. Generally used with SageMaker Neo to store the compiled artifacts.
" } }, + "AdditionalModelChannelName": { + "base": null, + "refs": { + "AdditionalModelDataSource$ChannelName": "A custom name for this AdditionalModelDataSource
object.
Data sources that are available to your model in addition to the one that you specify for ModelDataSource
when you use the CreateModel
action.
Data sources that are available to your model in addition to the one that you specify for ModelDataSource
when you use the CreateModel
action.
A data source used for training or inference that is in addition to the input dataset or model data.
", "refs": { @@ -579,6 +602,13 @@ "DescribeAlgorithmOutput$ValidationSpecification": "Details about configurations for one or more training jobs that SageMaker runs to test the algorithm.
" } }, + "AmazonQSettings": { + "base": "A collection of settings that configure the Amazon Q experience within the domain.
", + "refs": { + "DomainSettings$AmazonQSettings": "A collection of settings that configure the Amazon Q experience within the domain. The AuthMode
that you use to create the domain must be SSO
.
A collection of settings that configure the Amazon Q experience within the domain.
" + } + }, "AnnotationConsolidationConfig": { "base": "Configures how labels are consolidated across human workers and processes output data.
", "refs": { @@ -3097,6 +3127,16 @@ "refs": { } }, + "CreateOptimizationJobRequest": { + "base": null, + "refs": { + } + }, + "CreateOptimizationJobResponse": { + "base": null, + "refs": { + } + }, "CreatePipelineRequest": { "base": null, "refs": { @@ -3266,6 +3306,7 @@ "DescribeModelPackageOutput$CreationTime": "A timestamp specifying when the model package was created.
", "DescribeNotebookInstanceLifecycleConfigOutput$CreationTime": "A timestamp that tells when the lifecycle configuration was created.
", "DescribeNotebookInstanceOutput$CreationTime": "A timestamp. Use this parameter to return the time when the notebook instance was created
", + "DescribeOptimizationJobResponse$CreationTime": "The time when you created the optimization job.
", "DescribeSpaceResponse$CreationTime": "The creation time.
", "DescribeUserProfileResponse$CreationTime": "The creation time.
", "DomainDetails$CreationTime": "The creation time.
", @@ -3290,12 +3331,15 @@ "ListNotebookInstanceLifecycleConfigsInput$CreationTimeAfter": "A filter that returns only lifecycle configurations that were created after the specified time (timestamp).
", "ListNotebookInstancesInput$CreationTimeBefore": "A filter that returns only notebook instances that were created before the specified time (timestamp).
", "ListNotebookInstancesInput$CreationTimeAfter": "A filter that returns only notebook instances that were created after the specified time (timestamp).
", + "ListOptimizationJobsRequest$CreationTimeAfter": "Filters the results to only those optimization jobs that were created after the specified time.
", + "ListOptimizationJobsRequest$CreationTimeBefore": "Filters the results to only those optimization jobs that were created before the specified time.
", "ModelPackage$CreationTime": "The time that the model package was created.
", "ModelPackageGroup$CreationTime": "The time that the model group was created.
", "ModelPackageGroupSummary$CreationTime": "The time that the model group was created.
", "ModelPackageSummary$CreationTime": "A timestamp that shows when the model package was created.
", "NotebookInstanceLifecycleConfigSummary$CreationTime": "A timestamp that tells when the lifecycle configuration was created.
", "NotebookInstanceSummary$CreationTime": "A timestamp that shows when the notebook instance was created.
", + "OptimizationJobSummary$CreationTime": "The time when you created the optimization job.
", "SpaceDetails$CreationTime": "The creation time.
", "UserProfileDetails$CreationTime": "The creation time.
" } @@ -3844,6 +3888,11 @@ "refs": { } }, + "DeleteOptimizationJobRequest": { + "base": null, + "refs": { + } + }, "DeletePipelineRequest": { "base": null, "refs": { @@ -4477,6 +4526,16 @@ "refs": { } }, + "DescribeOptimizationJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeOptimizationJobResponse": { + "base": null, + "refs": { + } + }, "DescribePipelineDefinitionForExecutionRequest": { "base": null, "refs": { @@ -5545,6 +5604,7 @@ "CreateModelCardRequest$ModelCardName": "The unique name of the model card.
", "CreateModelPackageGroupInput$ModelPackageGroupName": "The name of the model group.
", "CreateModelPackageInput$ModelPackageName": "The name of the model package. The name must have 1 to 63 characters. Valid characters are a-z, A-Z, 0-9, and - (hyphen).
This parameter is required for unversioned models. It is not applicable to versioned models.
", + "CreateOptimizationJobRequest$OptimizationJobName": "A custom name for the new optimization job.
", "DeleteAlgorithmInput$AlgorithmName": "The name of the algorithm to delete.
", "DeleteCodeRepositoryInput$CodeRepositoryName": "The name of the Git repository to delete.
", "DeleteCompilationJobRequest$CompilationJobName": "The name of the compilation job to delete.
", @@ -5554,6 +5614,7 @@ "DeleteEdgeDeploymentStageRequest$StageName": "The name of the stage.
", "DeleteModelCardRequest$ModelCardName": "The name of the model card to delete.
", "DeleteModelPackageGroupPolicyInput$ModelPackageGroupName": "The name of the model group for which to delete the policy.
", + "DeleteOptimizationJobRequest$OptimizationJobName": "The name that you assigned to the optimization job.
", "DeploymentStage$StageName": "The name of the stage.
", "DeploymentStageStatusSummary$StageName": "The name of the stage.
", "DeregisterDevicesRequest$DeviceFleetName": "The name of the fleet the devices belong to.
", @@ -5581,6 +5642,8 @@ "DescribeModelPackageGroupOutput$ModelPackageGroupName": "The name of the model group.
", "DescribeModelPackageOutput$ModelPackageName": "The name of the model package being described.
", "DescribeModelPackageOutput$ModelPackageGroupName": "If the model is a versioned model, the name of the model group that the versioned model belongs to.
", + "DescribeOptimizationJobRequest$OptimizationJobName": "The name that you assigned to the optimization job.
", + "DescribeOptimizationJobResponse$OptimizationJobName": "The name that you assigned to the optimization job.
", "DeviceDeploymentSummary$EdgeDeploymentPlanName": "The name of the edge deployment plan.
", "DeviceDeploymentSummary$StageName": "The name of the stage in the edge deployment plan.
", "DeviceDeploymentSummary$DeployedStageName": "The name of the deployed stage.
", @@ -5622,6 +5685,7 @@ "ModelPackageSummary$ModelPackageName": "The name of the model package.
", "ModelPackageSummary$ModelPackageGroupName": "If the model package is a versioned model, the model group that the versioned model belongs to.
", "ModelPackageValidationProfile$ProfileName": "The name of the profile for the model package.
", + "OptimizationJobSummary$OptimizationJobName": "The name that you assigned to the optimization job.
", "PutModelPackageGroupPolicyInput$ModelPackageGroupName": "The name of the model group to add a resource policy to.
", "RegisterDevicesRequest$DeviceFleetName": "The name of the fleet.
", "StartEdgeDeploymentStageRequest$EdgeDeploymentPlanName": "The name of the edge deployment plan to start.
", @@ -5630,6 +5694,7 @@ "StopEdgeDeploymentStageRequest$EdgeDeploymentPlanName": "The name of the edge deployment plan to stop.
", "StopEdgeDeploymentStageRequest$StageName": "The name of the stage to stop.
", "StopEdgePackagingJobRequest$EdgePackagingJobName": "The name of the edge packaging job.
", + "StopOptimizationJobRequest$OptimizationJobName": "The name that you assigned to the optimization job.
", "UpdateCodeRepositoryInput$CodeRepositoryName": "The name of the Git repository to update.
", "UpdateDeviceFleetRequest$DeviceFleetName": "The name of the fleet.
", "UpdateDevicesRequest$DeviceFleetName": "The name of the fleet the devices belong to.
" @@ -5922,6 +5987,7 @@ "DescribeModelCardExportJobResponse$FailureReason": "The failure reason if the model export job fails.
", "DescribeMonitoringScheduleResponse$FailureReason": "A string, up to one KB in size, that contains the reason a monitoring job failed, if it failed.
", "DescribeNotebookInstanceOutput$FailureReason": "If status is Failed
, the reason it failed.
If the optimization job status is FAILED
, the reason for the failure.
A string, up to one KB in size, that contains the reason a processing job failed, if it failed.
", "DescribeSpaceResponse$FailureReason": "The failure reason.
", "DescribeTrainingJobResponse$FailureReason": "If the training job failed, the reason it failed.
", @@ -6130,6 +6196,7 @@ "FeatureStatus": { "base": null, "refs": { + "AmazonQSettings$Status": "Whether Amazon Q has been enabled within the domain.
", "DirectDeploySettings$Status": "Describes whether model deployment permissions are enabled or disabled in the Canvas application.
", "DockerSettings$EnableDockerAccess": "Indicates whether the domain can access Docker.
", "IdentityProviderOAuthSetting$Status": "Describes whether OAuth for a data source is enabled or disabled in the Canvas application.
", @@ -8064,6 +8131,7 @@ "MonitoringClusterConfig$VolumeKmsKeyId": "The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the model monitoring job.
", "MonitoringOutputConfig$KmsKeyId": "The Key Management Service (KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption.
", "OnlineStoreSecurityConfig$KmsKeyId": "The Amazon Web Services Key Management Service (KMS) key ARN that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.
The caller (either user or IAM role) of CreateFeatureGroup
must have below permissions to the OnlineStore
KmsKeyId
:
\"kms:Encrypt\"
\"kms:Decrypt\"
\"kms:DescribeKey\"
\"kms:CreateGrant\"
\"kms:RetireGrant\"
\"kms:ReEncryptFrom\"
\"kms:ReEncryptTo\"
\"kms:GenerateDataKey\"
\"kms:ListAliases\"
\"kms:ListGrants\"
\"kms:RevokeGrant\"
The caller (either user or IAM role) to all DataPlane operations (PutRecord
, GetRecord
, DeleteRecord
) must have the following permissions to the KmsKeyId
:
\"kms:Decrypt\"
The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker uses they key to encrypt the artifacts of the optimized model when SageMaker uploads the model to Amazon S3.
", "OutputConfig$KmsKeyId": "The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.
The KmsKeyId can be any of the following formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Alias name: alias/ExampleAlias
Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId
can be any of the following formats:
// KMS Key ID
\"1234abcd-12ab-34cd-56ef-1234567890ab\"
// Amazon Resource Name (ARN) of a KMS Key
\"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
// KMS Key Alias
\"alias/ExampleAlias\"
// Amazon Resource Name (ARN) of a KMS Key Alias
\"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must include permissions to call kms:Encrypt
. If you don't provide a KMS key ID, SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide. If the output data is stored in Amazon S3 Express One Zone, it is encrypted with server-side encryption with Amazon S3 managed keys (SSE-S3). KMS key is not supported for Amazon S3 Express One Zone
The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob
, CreateTransformJob
, or CreateHyperParameterTuningJob
requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.
Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId
when using an instance type with local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
For more information about local instance storage encryption, see SSD Instance Store Volumes.
A timestamp that shows when the job was last modified.
", "DescribeNotebookInstanceLifecycleConfigOutput$LastModifiedTime": "A timestamp that tells when the lifecycle configuration was last modified.
", "DescribeNotebookInstanceOutput$LastModifiedTime": "A timestamp. Use this parameter to retrieve the time when the notebook instance was last modified.
", + "DescribeOptimizationJobResponse$LastModifiedTime": "The time when the optimization job was last updated.
", "DescribeSpaceResponse$LastModifiedTime": "The last modified time.
", "DescribeUserProfileResponse$LastModifiedTime": "The last modified time.
", "DomainDetails$LastModifiedTime": "The last modified time.
", @@ -8289,8 +8358,11 @@ "ListNotebookInstanceLifecycleConfigsInput$LastModifiedTimeAfter": "A filter that returns only lifecycle configurations that were modified after the specified time (timestamp).
", "ListNotebookInstancesInput$LastModifiedTimeBefore": "A filter that returns only notebook instances that were modified before the specified time (timestamp).
", "ListNotebookInstancesInput$LastModifiedTimeAfter": "A filter that returns only notebook instances that were modified after the specified time (timestamp).
", + "ListOptimizationJobsRequest$LastModifiedTimeAfter": "Filters the results to only those optimization jobs that were updated after the specified time.
", + "ListOptimizationJobsRequest$LastModifiedTimeBefore": "Filters the results to only those optimization jobs that were updated before the specified time.
", "NotebookInstanceLifecycleConfigSummary$LastModifiedTime": "A timestamp that tells when the lifecycle configuration was last modified.
", "NotebookInstanceSummary$LastModifiedTime": "A timestamp that shows when the notebook instance was last modified.
", + "OptimizationJobSummary$LastModifiedTime": "The time when the optimization job was last updated.
", "SpaceDetails$LastModifiedTime": "The last modified time.
", "UserProfileDetails$LastModifiedTime": "The last modified time.
" } @@ -8981,6 +9053,22 @@ "refs": { } }, + "ListOptimizationJobsRequest": { + "base": null, + "refs": { + } + }, + "ListOptimizationJobsResponse": { + "base": null, + "refs": { + } + }, + "ListOptimizationJobsSortBy": { + "base": null, + "refs": { + "ListOptimizationJobsRequest$SortBy": "The field by which to sort the optimization jobs in the response. The default is CreationTime
The maximum number of jobs to return in the response. The default value is 10.
", "ListNotebookInstanceLifecycleConfigsInput$MaxResults": "The maximum number of lifecycle configurations to return in the response.
", "ListNotebookInstancesInput$MaxResults": "The maximum number of notebook instances to return.
", + "ListOptimizationJobsRequest$MaxResults": "The maximum number of optimization jobs to return in the response. The default is 50.
", "ListPipelineExecutionStepsRequest$MaxResults": "The maximum number of pipeline execution steps to return in the response.
", "ListPipelineExecutionsRequest$MaxResults": "The maximum number of pipeline executions to return in the response.
", "ListPipelineParametersForExecutionRequest$MaxResults": "The maximum number of parameters to return in the response.
", @@ -9850,6 +9939,12 @@ "TransformJob$ModelClientConfig": null } }, + "ModelCompilationConfig": { + "base": "Settings for the model compilation technique that's applied by a model optimization job.
", + "refs": { + "OptimizationConfig$ModelCompilationConfig": "Settings for the model compilation technique that's applied by a model optimization job.
" + } + }, "ModelCompressionType": { "base": null, "refs": { @@ -10305,6 +10400,12 @@ "DescribeModelQualityJobDefinitionResponse$ModelQualityJobInput": "Inputs for the model quality job.
" } }, + "ModelQuantizationConfig": { + "base": "Settings for the model quantization technique that's applied by a model optimization job.
", + "refs": { + "OptimizationConfig$ModelQuantizationConfig": "Settings for the model quantization technique that's applied by a model optimization job.
" + } + }, "ModelRegisterSettings": { "base": "The model registry settings for the SageMaker Canvas application.
", "refs": { @@ -10877,6 +10978,8 @@ "ListModelPackagesInput$NameContains": "A string in the model package name. This filter returns only model packages whose name contains the specified string.
", "ListModelQualityJobDefinitionsRequest$NameContains": "A string in the transform job name. This filter returns only model quality monitoring job definitions whose name contains the specified string.
", "ListMonitoringSchedulesRequest$NameContains": "Filter for monitoring schedules whose name contains a specified string.
", + "ListOptimizationJobsRequest$OptimizationContains": "Filters the results to only those optimization jobs that apply the specified optimization techniques. You can specify either Quantization
or Compilation
.
Filters the results to only those optimization jobs with a name that contains the specified string.
", "ListTrainingJobsRequest$NameContains": "A string in the training job name. This filter returns only training jobs whose name contains the specified string.
", "ListTransformJobsRequest$NameContains": "A string in the transform job name. This filter returns only transform jobs whose name contains the specified string.
" } @@ -11052,6 +11155,8 @@ "ListNotebookInstanceLifecycleConfigsOutput$NextToken": "If the response is truncated, SageMaker returns this token. To get the next set of lifecycle configurations, use it in the next request.
", "ListNotebookInstancesInput$NextToken": " If the previous call to the ListNotebookInstances
is truncated, the response includes a NextToken
. You can use this token in your subsequent ListNotebookInstances
request to fetch the next set of notebook instances.
You might specify a filter or a sort order in your request. When response is truncated, you must use the same values for the filer and sort order in the next request.
If the response to the previous ListNotebookInstances
request was truncated, SageMaker returns this token. To retrieve the next set of notebook instances, use the token in the next request.
A token that you use to get the next set of results following a truncated response. If the response to the previous request was truncated, that response provides the value for this token.
", + "ListOptimizationJobsResponse$NextToken": "The token to use in a subsequent request to get the next set of results following a truncated response.
", "ListPipelineExecutionStepsRequest$NextToken": "If the result of the previous ListPipelineExecutionSteps
request was truncated, the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
If the result of the previous ListPipelineExecutionSteps
request was truncated, the response includes a NextToken
. To retrieve the next set of pipeline execution steps, use the token in the next request.
If the result of the previous ListPipelineExecutions
request was truncated, the response includes a NextToken
. To retrieve the next set of pipeline executions, use the token in the next request.
A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:
The value of Name
equals Value
.
The value of Name
doesn't equal Value
.
The Name
property exists.
The Name
property does not exist.
The value of Name
is greater than Value
. Not supported for text properties.
The value of Name
is greater than or equal to Value
. Not supported for text properties.
The value of Name
is less than Value
. Not supported for text properties.
The value of Name
is less than or equal to Value
. Not supported for text properties.
The value of Name
is one of the comma delimited strings in Value
. Only supported for text properties.
The value of Name
contains the string Value
. Only supported for text properties.
A SearchExpression
can include the Contains
operator multiple times when the value of Name
is one of the following:
Experiment.DisplayName
Experiment.ExperimentName
Experiment.Tags
Trial.DisplayName
Trial.TrialName
Trial.Tags
TrialComponent.DisplayName
TrialComponent.TrialComponentName
TrialComponent.Tags
TrialComponent.InputArtifacts
TrialComponent.OutputArtifacts
A SearchExpression
can include only one Contains
operator for all other values of Name
. In these cases, if you include multiple Contains
operators in the SearchExpression
, the result is the following error message: \"'CONTAINS' operator usage limit of 1 exceeded.
\"
Settings for an optimization technique that you apply with a model optimization job.
", + "refs": { + "OptimizationConfigs$member": null + } + }, + "OptimizationConfigs": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$OptimizationConfigs": "Settings for each of the optimization techniques that the job applies.
", + "DescribeOptimizationJobResponse$OptimizationConfigs": "Settings for each of the optimization techniques that the job applies.
" + } + }, + "OptimizationContainerImage": { + "base": null, + "refs": { + "ModelCompilationConfig$Image": "The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.
", + "ModelQuantizationConfig$Image": "The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the optimization.
", + "OptimizationOutput$RecommendedInferenceImage": "The image that SageMaker recommends that you use to host the optimized model that you created with an optimization job.
" + } + }, + "OptimizationJobArn": { + "base": null, + "refs": { + "CreateOptimizationJobResponse$OptimizationJobArn": "The Amazon Resource Name (ARN) of the optimization job.
", + "DescribeOptimizationJobResponse$OptimizationJobArn": "The Amazon Resource Name (ARN) of the optimization job.
", + "OptimizationJobSummary$OptimizationJobArn": "The Amazon Resource Name (ARN) of the optimization job.
" + } + }, + "OptimizationJobDeploymentInstanceType": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$DeploymentInstanceType": "The type of instance that hosts the optimized model that you create with the optimization job.
", + "DescribeOptimizationJobResponse$DeploymentInstanceType": "The type of instance that hosts the optimized model that you create with the optimization job.
", + "OptimizationJobSummary$DeploymentInstanceType": "The type of instance that hosts the optimized model that you create with the optimization job.
" + } + }, + "OptimizationJobEnvironmentVariables": { + "base": null, + "refs": { + "CreateOptimizationJobRequest$OptimizationEnvironment": "The environment variables to set in the model container.
", + "DescribeOptimizationJobResponse$OptimizationEnvironment": "The environment variables to set in the model container.
", + "ModelCompilationConfig$OverrideEnvironment": "Environment variables that override the default ones in the model container.
", + "ModelQuantizationConfig$OverrideEnvironment": "Environment variables that override the default ones in the model container.
" + } + }, + "OptimizationJobModelSource": { + "base": "The location of the source model to optimize with an optimization job.
", + "refs": { + "CreateOptimizationJobRequest$ModelSource": "The location of the source model to optimize with an optimization job.
", + "DescribeOptimizationJobResponse$ModelSource": "The location of the source model to optimize with an optimization job.
" + } + }, + "OptimizationJobModelSourceS3": { + "base": "The Amazon S3 location of a source model to optimize with an optimization job.
", + "refs": { + "OptimizationJobModelSource$S3": "The Amazon S3 location of a source model to optimize with an optimization job.
" + } + }, + "OptimizationJobOutputConfig": { + "base": "Details for where to store the optimized model that you create with the optimization job.
", + "refs": { + "CreateOptimizationJobRequest$OutputConfig": "Details for where to store the optimized model that you create with the optimization job.
", + "DescribeOptimizationJobResponse$OutputConfig": "Details for where to store the optimized model that you create with the optimization job.
" + } + }, + "OptimizationJobStatus": { + "base": null, + "refs": { + "DescribeOptimizationJobResponse$OptimizationJobStatus": "The current status of the optimization job.
", + "ListOptimizationJobsRequest$StatusEquals": "Filters the results to only those optimization jobs with the specified status.
", + "OptimizationJobSummary$OptimizationJobStatus": "The current status of the optimization job.
" + } + }, + "OptimizationJobSummaries": { + "base": null, + "refs": { + "ListOptimizationJobsResponse$OptimizationJobSummaries": "A list of optimization jobs and their properties that matches any of the filters you specified in the request.
" + } + }, + "OptimizationJobSummary": { + "base": "Summarizes an optimization job by providing some of its key properties.
", + "refs": { + "OptimizationJobSummaries$member": null + } + }, + "OptimizationModelAcceptEula": { + "base": null, + "refs": { + "OptimizationModelAccessConfig$AcceptEula": "Specifies agreement to the model end-user license agreement (EULA). The AcceptEula
value must be explicitly defined as True
in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).
", + "refs": { + "OptimizationJobModelSourceS3$ModelAccessConfig": "The access configuration settings for the source ML model for an optimization job, where you can accept the model end-user license agreement (EULA).
" + } + }, + "OptimizationOutput": { + "base": "Output values produced by an optimization job.
", + "refs": { + "DescribeOptimizationJobResponse$OptimizationOutput": "Output values produced by an optimization job.
" + } + }, + "OptimizationType": { + "base": null, + "refs": { + "OptimizationTypes$member": null + } + }, + "OptimizationTypes": { + "base": null, + "refs": { + "OptimizationJobSummary$OptimizationTypes": "The optimization techniques that are applied by the optimization job.
" + } + }, + "OptimizationVpcConfig": { + "base": "A VPC in Amazon VPC that's accessible to an optimized that you create with an optimization job. You can control access to and from your resources by configuring a VPC. For more information, see Give SageMaker Access to Resources in your Amazon VPC.
", + "refs": { + "CreateOptimizationJobRequest$VpcConfig": "A VPC in Amazon VPC that your optimized model has access to.
", + "DescribeOptimizationJobResponse$VpcConfig": "A VPC in Amazon VPC that your optimized model has access to.
" + } + }, + "OptimizationVpcSecurityGroupId": { + "base": null, + "refs": { + "OptimizationVpcSecurityGroupIds$member": null + } + }, + "OptimizationVpcSecurityGroupIds": { + "base": null, + "refs": { + "OptimizationVpcConfig$SecurityGroupIds": "The VPC security group IDs, in the form sg-xxxxxxxx
. Specify the security groups for the VPC that is specified in the Subnets
field.
The ID of the subnets in the VPC to which you want to connect your optimized model.
" + } + }, "OptionalDouble": { "base": null, "refs": { @@ -12518,6 +12771,12 @@ "refs": { } }, + "QProfileArn": { + "base": null, + "refs": { + "AmazonQSettings$QProfileArn": "The ARN of the Amazon Q profile used within the domain.
" + } + }, "QualityCheckStepMetadata": { "base": "Container for the metadata for a Quality check step. For more information, see the topic on QualityCheck step in the Amazon SageMaker Developer Guide.
", "refs": { @@ -13205,6 +13464,7 @@ "CreateModelInput$ExecutionRoleArn": "The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute instances or for batch transform jobs. Deploying on ML compute instances is part of model hosting. For more information, see SageMaker Roles.
To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole
permission.
The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "CreateNotebookInstanceInput$RoleArn": "When you send any requests to Amazon Web Services resources from the notebook instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this role necessary permissions so SageMaker can perform these tasks. The policy must allow the SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For more information, see SageMaker Roles.
To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole
permission.
The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.
During model optimization, Amazon SageMaker needs your permission to:
Read input data from an S3 bucket
Write model artifacts to an S3 bucket
Write logs to Amazon CloudWatch Logs
Publish metrics to Amazon CloudWatch
You grant permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this API must have the iam:PassRole
permission. For more information, see Amazon SageMaker Roles.
The Amazon Resource Name (ARN) of the role used by the pipeline to access and create resources.
", "CreateProcessingJobRequest$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "CreateTrainingJobRequest$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf.
During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles.
To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole
permission.
The Amazon Resource Name (ARN) of the IAM role that you specified for the model.
", "DescribeModelQualityJobDefinitionResponse$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "DescribeNotebookInstanceOutput$RoleArn": "The Amazon Resource Name (ARN) of the IAM role associated with the instance.
", + "DescribeOptimizationJobResponse$RoleArn": "The ARN of the IAM role that you assigned to the optimization job.
", "DescribePipelineResponse$RoleArn": "The Amazon Resource Name (ARN) that the pipeline uses to execute.
", "DescribeProcessingJobResponse$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "DescribeTrainingJobResponse$RoleArn": "The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
", @@ -13321,6 +13582,7 @@ "S3ModelDataSource": { "base": "Specifies the S3 location of ML model data to deploy.
", "refs": { + "AdditionalModelDataSource$S3DataSource": null, "ModelDataSource$S3DataSource": "Specifies the S3 location of ML model data to deploy.
" } }, @@ -13395,6 +13657,8 @@ "MonitoringAppSpecification$PostAnalyticsProcessorSourceUri": "An Amazon S3 URI to a script that is called after analysis has been performed. Applicable only for the built-in (first party) containers.
", "MonitoringConstraintsResource$S3Uri": "The Amazon S3 URI for the constraints resource.
", "MonitoringStatisticsResource$S3Uri": "The Amazon S3 URI for the statistics resource.
", + "OptimizationJobModelSourceS3$S3Uri": "An Amazon S3 URI that locates a source model to optimize with an optimization job.
", + "OptimizationJobOutputConfig$S3OutputLocation": "The Amazon S3 URI for where to store the optimized model that you create with an optimization job.
", "OutputConfig$S3OutputLocation": "Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix
.
Identifies the S3 path where you want SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix
.
The URI of the Amazon S3 prefix Amazon SageMaker downloads data required to run a processing job.
", @@ -13883,6 +14147,7 @@ "ListMonitoringAlertHistoryRequest$SortOrder": "The sort order, whether Ascending
or Descending
, of the alert history. The default is Descending
.
Whether to sort the results in Ascending
or Descending
order. The default is Descending
.
Whether to sort the results in Ascending
or Descending
order. The default is Descending
.
The sort order for results. The default is Ascending
The field by which to sort results. The default is CreatedTime
.
The sort order for results.
", "ListPipelinesRequest$SortOrder": "The sort order for results.
", @@ -14259,6 +14524,11 @@ "refs": { } }, + "StopOptimizationJobRequest": { + "base": null, + "refs": { + } + }, "StopPipelineExecutionRequest": { "base": null, "refs": { @@ -14285,11 +14555,13 @@ } }, "StoppingCondition": { - "base": "Specifies a limit to how long a model training job or model compilation job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap model training costs.
To stop a training job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.
Specifies a limit to how long a job can run. When the job reaches the time limit, SageMaker ends the job. Use this API to cap costs.
To stop a training job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
The training algorithms provided by SageMaker automatically save the intermediate results of a model training job when possible. This attempt to save artifacts is only a best effort case as model might not be in a state from which it can be saved. For example, if training has just started, the model might not be ready to save. When saved, this intermediate data is a valid model artifact. You can use it to create a model with CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model artifacts. When training NTMs, make sure that the maximum runtime is sufficient for the training job to complete.
Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.
", + "CreateOptimizationJobRequest$StoppingCondition": null, "CreateTrainingJobRequest$StoppingCondition": "Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
Specifies a limit to how long a model compilation job can run. When the job reaches the time limit, Amazon SageMaker ends the compilation job. Use this API to cap model training costs.
", + "DescribeOptimizationJobResponse$StoppingCondition": null, "DescribeTrainingJobResponse$StoppingCondition": "Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
Specifies a limit to how long a model hyperparameter training job can run. It also specifies how long a managed spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.
", "TrainingJob$StoppingCondition": "Specifies a limit to how long a model training job can run. It also specifies how long a managed Spot training job has to complete. When the job reaches the time limit, SageMaker ends the training job. Use this API to cap model training costs.
To stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays job termination for 120 seconds. Algorithms can use this 120-second window to save the model artifacts, so the results of training are not lost.
A filter that returns only components that have the specified source Amazon Resource Name (ARN). If you specify SourceArn
, you can't filter by ExperimentName
or TrialName
.
The value to filter the model metadata.
", "ModelStepMetadata$Arn": "The Amazon Resource Name (ARN) of the created model.
", + "OptimizationJobEnvironmentVariables$value": null, "OutputParameter$Name": "The name of the output parameter.
", "QualityCheckStepMetadata$CheckType": "The type of the Quality check step.
", "QualityCheckStepMetadata$ModelPackageGroupName": "The model package group name.
", @@ -14708,6 +14981,7 @@ "CreateModelQualityJobDefinitionRequest$Tags": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
", "CreateMonitoringScheduleRequest$Tags": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
", "CreateNotebookInstanceInput$Tags": "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or environment. For more information, see Tagging Amazon Web Services Resources.
", + "CreateOptimizationJobRequest$Tags": "A list of key-value pairs associated with the optimization job. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.
", "CreatePipelineRequest$Tags": "A list of tags to apply to the created pipeline.
", "CreateProcessingJobRequest$Tags": "(Optional) An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide.
", "CreateProjectInput$Tags": "An array of key-value pairs that you want to use to organize and track your Amazon Web Services resource costs. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.
", @@ -15097,6 +15371,8 @@ "DescribeModelQualityJobDefinitionResponse$CreationTime": "The time at which the model quality job was created.
", "DescribeMonitoringScheduleResponse$CreationTime": "The time at which the monitoring job was created.
", "DescribeMonitoringScheduleResponse$LastModifiedTime": "The time at which the monitoring job was last modified.
", + "DescribeOptimizationJobResponse$OptimizationStartTime": "The time when the optimization job started.
", + "DescribeOptimizationJobResponse$OptimizationEndTime": "The time when the optimization job finished processing.
", "DescribePipelineDefinitionForExecutionResponse$CreationTime": "The time when the pipeline was created.
", "DescribePipelineExecutionResponse$CreationTime": "The time when the pipeline execution was created.
", "DescribePipelineExecutionResponse$LastModifiedTime": "The time when the pipeline execution was modified last.
", @@ -15350,6 +15626,8 @@ "MonitoringSchedule$LastModifiedTime": "The last time the monitoring schedule was changed.
", "MonitoringScheduleSummary$CreationTime": "The creation time of the monitoring schedule.
", "MonitoringScheduleSummary$LastModifiedTime": "The last time the monitoring schedule was modified.
", + "OptimizationJobSummary$OptimizationStartTime": "The time when the optimization job started.
", + "OptimizationJobSummary$OptimizationEndTime": "The time when the optimization job finished processing.
", "PendingDeploymentSummary$StartTime": "The start time of the deployment.
", "Pipeline$CreationTime": "The creation time of the pipeline.
", "Pipeline$LastModifiedTime": "The time that the pipeline was last modified.
", diff --git a/models/apis/sagemaker/2017-07-24/paginators-1.json b/models/apis/sagemaker/2017-07-24/paginators-1.json index 89823b522f3..7c20db79961 100644 --- a/models/apis/sagemaker/2017-07-24/paginators-1.json +++ b/models/apis/sagemaker/2017-07-24/paginators-1.json @@ -318,6 +318,12 @@ "limit_key": "MaxResults", "result_key": "NotebookInstances" }, + "ListOptimizationJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "OptimizationJobSummaries" + }, "ListPipelineExecutionSteps": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 312484c9c10..56460f6da0f 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -28523,31 +28523,17 @@ }, "redshift" : { "endpoints" : { - "fips-us-iso-east-1" : { + "us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "redshift.us-iso-east-1.c2s.ic.gov" }, - "fips-us-iso-west-1" : { + "us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-iso-west-1.c2s.ic.gov" } } }, @@ -29211,18 +29197,11 @@ }, "redshift" : { "endpoints" : { - "fips-us-isob-east-1" : { + "us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, - "deprecated" : true, - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "redshift.us-isob-east-1.sc2s.sgov.gov" } } }, diff --git a/service/datazone/api.go b/service/datazone/api.go index a22093a9030..6936bb12a89 100644 --- a/service/datazone/api.go +++ b/service/datazone/api.go @@ -20075,8 +20075,9 @@ func (s *CustomParameter) SetKeyName(v string) *CustomParameter { return s } +// Deprecated: This structure is deprecated. type DataProductItem struct { - _ struct{} `type:"structure"` + _ struct{} `deprecated:"true" type:"structure"` DomainId *string `locationName:"domainId" type:"string"` @@ -20113,14 +20114,15 @@ func (s *DataProductItem) SetItemId(v string) *DataProductItem { return s } +// Deprecated: This structure is deprecated. type DataProductSummary struct { - _ struct{} `type:"structure"` + _ struct{} `deprecated:"true" type:"structure"` CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` CreatedBy *string `locationName:"createdBy" type:"string"` - DataProductItems []*DataProductItem `locationName:"dataProductItems" type:"list"` + DataProductItems []*DataProductItem `locationName:"dataProductItems" deprecated:"true" type:"list"` // Description is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by DataProductSummary's @@ -37613,7 +37615,9 @@ type SearchInventoryResultItem struct { AssetItem *AssetItem `locationName:"assetItem" type:"structure"` // The data product item included in the search results. - DataProductItem *DataProductSummary `locationName:"dataProductItem" type:"structure"` + // + // Deprecated: This field is deprecated. + DataProductItem *DataProductSummary `locationName:"dataProductItem" deprecated:"true" type:"structure"` // The glossary item included in the search results. GlossaryItem *GlossaryItem `locationName:"glossaryItem" type:"structure"` diff --git a/service/fsx/api.go b/service/fsx/api.go index ebf0c81d1e5..cbeac87afef 100644 --- a/service/fsx/api.go +++ b/service/fsx/api.go @@ -5511,7 +5511,7 @@ type AdministrativeAction struct { // in the Amazon FSx for Windows File Server User Guide. // // * STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase - // a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION + // a file system's storage capacity has completed successfully, a STORAGE_OPTIMIZATION // task starts. For Windows and ONTAP, storage optimization is the process // of migrating the file system data to newer larger disks. For Lustre, storage // optimization consists of rebalancing the data across the existing and @@ -5561,6 +5561,11 @@ type AdministrativeAction struct { // * RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) // V3 locks on an Amazon FSx for OpenZFS file system. // + // * DOWNLOAD_DATA_FROM_BACKUP - An FSx for ONTAP backup is being restored + // to a new volume on a second-generation file system. Once the all the file + // metadata is loaded onto the volume, you can mount the volume with read-only + // access. during this process. + // // * VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from a snapshot // on a different FSx for OpenZFS file system. You can initiate this from // the Amazon FSx console, API (CreateVolume), or CLI (create-volume) when @@ -5574,8 +5579,9 @@ type AdministrativeAction struct { // Provides information about a failed administrative action. FailureDetails *AdministrativeActionFailureDetails `type:"structure"` - // The percentage-complete status of a STORAGE_OPTIMIZATION administrative action. - // Does not apply to any other administrative action type. + // The percentage-complete status of a STORAGE_OPTIMIZATION or DOWNLOAD_DATA_FROM_BACKUP + // administrative action. Does not apply to any other administrative action + // type. ProgressPercent *int64 `type:"integer"` // The remaining bytes to transfer for the FSx for OpenZFS snapshot that you're @@ -5594,10 +5600,23 @@ type AdministrativeAction struct { // * PENDING - Amazon FSx is waiting to process the administrative action. // // * COMPLETED - Amazon FSx has finished processing the administrative task. + // For a backup restore to a second-generation FSx for ONTAP file system, + // indicates that all data has been downloaded to the volume, and clients + // now have read-write access to volume. // // * UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon // FSx has updated the file system with the new storage capacity, and is // now performing the storage-optimization process. + // + // * PENDING - For a backup restore to a second-generation FSx for ONTAP + // file system, indicates that the file metadata is being downloaded onto + // the volume. The volume's Lifecycle state is CREATING. + // + // * IN_PROGRESS - For a backup restore to a second-generation FSx for ONTAP + // file system, indicates that all metadata has been downloaded to the new + // volume and client can access data with read-only access while Amazon FSx + // downloads the file data to the volume. Track the progress of this process + // with the ProgressPercent element. Status *string `type:"string" enum:"Status"` // The target value for the administration action, provided in the UpdateFileSystem @@ -5740,7 +5759,7 @@ type AggregateConfiguration struct { // conditions: // // * The strings in the value of Aggregates are not are not formatted as - // aggrX, where X is a number between 1 and 6. + // aggrX, where X is a number between 1 and 12. // // * The value of Aggregates contains aggregates that are not present. // @@ -9316,13 +9335,20 @@ type CreateFileSystemOntapConfiguration struct { // Specifies the FSx for ONTAP file system deployment type to use in creating // the file system. // - // * MULTI_AZ_1 - (Default) A high availability file system configured for - // Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // * MULTI_AZ_1 - A high availability file system configured for Multi-AZ + // redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // This is a first-generation FSx for ONTAP file system. // - // * SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. + // * MULTI_AZ_2 - A high availability file system configured for Multi-AZ + // redundancy to tolerate temporary AZ unavailability. This is a second-generation + // FSx for ONTAP file system. + // + // * SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This + // is a first-generation FSx for ONTAP file system. // // * SINGLE_AZ_2 - A file system configured with multiple high-availability - // (HA) pairs for Single-AZ redundancy. + // (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for + // ONTAP file system. // // For information about the use cases for Multi-AZ and Single-AZ deployments, // refer to Choosing a file system deployment type (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html). @@ -9352,12 +9378,15 @@ type CreateFileSystemOntapConfiguration struct { FsxAdminPassword *string `min:"8" type:"string" sensitive:"true"` // Specifies how many high-availability (HA) pairs of file servers will power - // your file system. Scale-up file systems are powered by 1 HA pair. The default - // value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 - // HA pairs. The value of this property affects the values of StorageCapacity, + // your file system. First-generation file systems are powered by 1 HA pair. + // Second-generation multi-AZ file systems are powered by 1 HA pair. Second + // generation single-AZ file systems are powered by up to 12 HA pairs. The default + // value is 1. The value of this property affects the values of StorageCapacity, // Iops, and ThroughputCapacity. For more information, see High-availability // (HA) pairs (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs) - // in the FSx for ONTAP user guide. + // in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and + // NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For + // more information, see Using block storage protocols (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage). // // Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following // conditions: @@ -9365,11 +9394,11 @@ type CreateFileSystemOntapConfiguration struct { // * The value of HAPairs is less than 1 or greater than 12. // // * The value of HAPairs is greater than 1 and the value of DeploymentType - // is SINGLE_AZ_1 or MULTI_AZ_1. + // is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2. HAPairs *int64 `min:"1" type:"integer"` - // Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet - // in which you want the preferred file server to be located. + // Required when DeploymentType is set to MULTI_AZ_1 or MULTI_AZ_2. This specifies + // the subnet in which you want the preferred file server to be located. PreferredSubnetId *string `min:"15" type:"string"` // (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the @@ -9406,13 +9435,15 @@ type CreateFileSystemOntapConfiguration struct { // You can define either the ThroughputCapacityPerHAPair or the ThroughputCapacity // when creating a file system, but not both. // - // This field and ThroughputCapacity are the same for scale-up file systems - // powered by one HA pair. + // This field and ThroughputCapacity are the same for file systems powered by + // one HA pair. // // * For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, // 512, 1024, 2048, or 4096 MBps. // - // * For SINGLE_AZ_2 file systems, valid values are 3072 or 6144 MBps. + // * For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. + // + // * For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. // // Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following // conditions: @@ -9421,8 +9452,8 @@ type CreateFileSystemOntapConfiguration struct { // not the same value for file systems with one HA pair. // // * The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / - // ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and - // 12). + // ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 + // and 12). // // * The value of ThroughputCapacityPerHAPair is not a valid value. ThroughputCapacityPerHAPair *int64 `min:"128" type:"integer"` @@ -9599,28 +9630,31 @@ type CreateFileSystemOpenZFSConfiguration struct { // 05:00 specifies 5 AM daily. DailyAutomaticBackupStartTime *string `min:"5" type:"string"` - // Specifies the file system deployment type. Single AZ deployment types are - // configured for redundancy within a single Availability Zone in an Amazon - // Web Services Region . Valid values are the following: + // Specifies the file system deployment type. Valid values are the following: // - // * MULTI_AZ_1- Creates file systems with high availability that are configured - // for Multi-AZ redundancy to tolerate temporary unavailability in Availability - // Zones (AZs). Multi_AZ_1 is available only in the US East (N. Virginia), - // US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific - // (Tokyo), and Europe (Ireland) Amazon Web Services Regions. + // * MULTI_AZ_1- Creates file systems with high availability and durability + // by replicating your data and supporting failover across multiple Availability + // Zones in the same Amazon Web Services Region. // - // * SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - - // 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions - // where Amazon FSx for OpenZFS is available. + // * SINGLE_AZ_HA_2- Creates file systems with high availability and throughput + // capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying + // a primary and standby file system within the same Availability Zone. + // + // * SINGLE_AZ_HA_1- Creates file systems with high availability and throughput + // capacities of 64 - 4,096 MB/s by deploying a primary and standby file + // system within the same Availability Zone. // // * SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - // - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only - // in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific - // (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) Amazon Web Services - // Regions. + // - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within + // a single Availability Zone. // - // For more information, see Deployment type availability (https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions) - // and File system performance (https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance) + // * SINGLE_AZ_1- Creates file systems with throughput capacities of 64 - + // 4,096 MBs that automatically recover within a single Availability Zone. + // + // For a list of which Amazon Web Services Regions each deployment type is available + // in, see Deployment type availability (https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions). + // For more information on the differences in performance between deployment + // types, see File system performance (https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance) // in the Amazon FSx for OpenZFS User Guide. // // DeploymentType is a required field @@ -15530,7 +15564,8 @@ type FileCacheCreating struct { // cache. DataRepositoryAssociationIds []*string `type:"list"` - // A structure providing details of any failures that occurred. + // A structure providing details of any failures that occurred in creating a + // cache. FailureDetails *FileCacheFailureDetails `type:"structure"` // The system-generated, unique ID of the cache. @@ -18372,13 +18407,20 @@ type OntapFileSystemConfiguration struct { // Specifies the FSx for ONTAP file system deployment type in use in the file // system. // - // * MULTI_AZ_1 - (Default) A high availability file system configured for - // Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // * MULTI_AZ_1 - A high availability file system configured for Multi-AZ + // redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // This is a first-generation FSx for ONTAP file system. + // + // * MULTI_AZ_2 - A high availability file system configured for Multi-AZ + // redundancy to tolerate temporary AZ unavailability. This is a second-generation + // FSx for ONTAP file system. // - // * SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. + // * SINGLE_AZ_1 - A file system configured for Single-AZ redundancy. This + // is a first-generation FSx for ONTAP file system. // // * SINGLE_AZ_2 - A file system configured with multiple high-availability - // (HA) pairs for Single-AZ redundancy. + // (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for + // ONTAP file system. // // For information about the use cases for Multi-AZ and Single-AZ deployments, // refer to Choosing Multi-AZ or Single-AZ file system deployment (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-multiAZ.html). @@ -18422,7 +18464,7 @@ type OntapFileSystemConfiguration struct { // * The value of HAPairs is less than 1 or greater than 12. // // * The value of HAPairs is greater than 1 and the value of DeploymentType - // is SINGLE_AZ_1 or MULTI_AZ_1. + // is SINGLE_AZ_1, MULTI_AZ_1, or MULTI_AZ_2. HAPairs *int64 `min:"1" type:"integer"` // The ID for a subnet. A subnet is a range of IP addresses in your virtual @@ -18448,10 +18490,12 @@ type OntapFileSystemConfiguration struct { // This field and ThroughputCapacity are the same for file systems with one // HA pair. // - // * For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, - // 2048, or 4096 MBps. + // * For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, + // 512, 1024, 2048, or 4096 MBps. // - // * For SINGLE_AZ_2, valid values are 3072 or 6144 MBps. + // * For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. + // + // * For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. // // Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following // conditions: @@ -18460,8 +18504,8 @@ type OntapFileSystemConfiguration struct { // not the same value. // // * The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / - // ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and - // 12). + // ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 + // and 12). // // * The value of ThroughputCapacityPerHAPair is not a valid value. ThroughputCapacityPerHAPair *int64 `min:"128" type:"integer"` @@ -19047,7 +19091,7 @@ type OpenZFSFileSystemConfiguration struct { DailyAutomaticBackupStartTime *string `min:"5" type:"string"` // Specifies the file-system deployment type. Amazon FSx for OpenZFS supports - // MULTI_AZ_1, SINGLE_AZ_1, and SINGLE_AZ_2. + // MULTI_AZ_1, SINGLE_AZ_HA_2, SINGLE_AZ_HA_1, SINGLE_AZ_2, and SINGLE_AZ_1. DeploymentType *string `type:"string" enum:"OpenZFSDeploymentType"` // The SSD IOPS (input/output operations per second) configuration for an Amazon @@ -22808,6 +22852,16 @@ type UpdateFileSystemOntapConfiguration struct { // String and GoString methods. FsxAdminPassword *string `min:"8" type:"string" sensitive:"true"` + // Use to update the number of high-availability (HA) pairs for a second-generation + // single-AZ file system. If you increase the number of HA pairs for your file + // system, you must specify proportional increases for StorageCapacity, Iops, + // and ThroughputCapacity. For more information, see High-availability (HA) + // pairs (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs) + // in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and + // NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For + // more information, see Using block storage protocols (https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage). + HAPairs *int64 `min:"1" type:"integer"` + // (Multi-AZ only) A list of IDs of existing virtual private cloud (VPC) route // tables to disassociate (remove) from your Amazon FSx for NetApp ONTAP file // system. You can use the API operation to retrieve the list of VPC route table @@ -22838,10 +22892,12 @@ type UpdateFileSystemOntapConfiguration struct { // This field and ThroughputCapacity are the same for file systems with one // HA pair. // - // * For SINGLE_AZ_1 and MULTI_AZ_1, valid values are 128, 256, 512, 1024, - // 2048, or 4096 MBps. + // * For SINGLE_AZ_1 and MULTI_AZ_1 file systems, valid values are 128, 256, + // 512, 1024, 2048, or 4096 MBps. // - // * For SINGLE_AZ_2, valid values are 3072 or 6144 MBps. + // * For SINGLE_AZ_2, valid values are 1536, 3072, or 6144 MBps. + // + // * For MULTI_AZ_2, valid values are 384, 768, 1536, 3072, or 6144 MBps. // // Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following // conditions: @@ -22850,8 +22906,8 @@ type UpdateFileSystemOntapConfiguration struct { // not the same value for file systems with one HA pair. // // * The value of deployment type is SINGLE_AZ_2 and ThroughputCapacity / - // ThroughputCapacityPerHAPair is a valid HA pair (a value between 2 and - // 12). + // ThroughputCapacityPerHAPair is not a valid HA pair (a value between 1 + // and 12). // // * The value of ThroughputCapacityPerHAPair is not a valid value. ThroughputCapacityPerHAPair *int64 `min:"128" type:"integer"` @@ -22896,6 +22952,9 @@ func (s *UpdateFileSystemOntapConfiguration) Validate() error { if s.FsxAdminPassword != nil && len(*s.FsxAdminPassword) < 8 { invalidParams.Add(request.NewErrParamMinLen("FsxAdminPassword", 8)) } + if s.HAPairs != nil && *s.HAPairs < 1 { + invalidParams.Add(request.NewErrParamMinValue("HAPairs", 1)) + } if s.ThroughputCapacity != nil && *s.ThroughputCapacity < 8 { invalidParams.Add(request.NewErrParamMinValue("ThroughputCapacity", 8)) } @@ -22942,6 +23001,12 @@ func (s *UpdateFileSystemOntapConfiguration) SetFsxAdminPassword(v string) *Upda return s } +// SetHAPairs sets the HAPairs field's value. +func (s *UpdateFileSystemOntapConfiguration) SetHAPairs(v int64) *UpdateFileSystemOntapConfiguration { + s.HAPairs = &v + return s +} + // SetRemoveRouteTableIds sets the RemoveRouteTableIds field's value. func (s *UpdateFileSystemOntapConfiguration) SetRemoveRouteTableIds(v []*string) *UpdateFileSystemOntapConfiguration { s.RemoveRouteTableIds = v @@ -24966,7 +25031,7 @@ func ActiveDirectoryErrorType_Values() []string { // in the Amazon FSx for Windows File Server User Guide. // // - STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE task to increase -// a file system's storage capacity has been completed successfully, a STORAGE_OPTIMIZATION +// a file system's storage capacity has completed successfully, a STORAGE_OPTIMIZATION // task starts. For Windows and ONTAP, storage optimization is the process // of migrating the file system data to newer larger disks. For Lustre, storage // optimization consists of rebalancing the data across the existing and @@ -25016,6 +25081,11 @@ func ActiveDirectoryErrorType_Values() []string { // - RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System (NFS) // V3 locks on an Amazon FSx for OpenZFS file system. // +// - DOWNLOAD_DATA_FROM_BACKUP - An FSx for ONTAP backup is being restored +// to a new volume on a second-generation file system. Once the all the file +// metadata is loaded onto the volume, you can mount the volume with read-only +// access. during this process. +// // - VOLUME_INITIALIZE_WITH_SNAPSHOT - A volume is being created from a snapshot // on a different FSx for OpenZFS file system. You can initiate this from // the Amazon FSx console, API (CreateVolume), or CLI (create-volume) when @@ -25066,6 +25136,9 @@ const ( // AdministrativeActionTypeVolumeInitializeWithSnapshot is a AdministrativeActionType enum value AdministrativeActionTypeVolumeInitializeWithSnapshot = "VOLUME_INITIALIZE_WITH_SNAPSHOT" + + // AdministrativeActionTypeDownloadDataFromBackup is a AdministrativeActionType enum value + AdministrativeActionTypeDownloadDataFromBackup = "DOWNLOAD_DATA_FROM_BACKUP" ) // AdministrativeActionType_Values returns all elements of the AdministrativeActionType enum @@ -25085,6 +25158,7 @@ func AdministrativeActionType_Values() []string { AdministrativeActionTypeMisconfiguredStateRecovery, AdministrativeActionTypeVolumeUpdateWithSnapshot, AdministrativeActionTypeVolumeInitializeWithSnapshot, + AdministrativeActionTypeDownloadDataFromBackup, } } @@ -25739,6 +25813,9 @@ const ( // OntapDeploymentTypeSingleAz2 is a OntapDeploymentType enum value OntapDeploymentTypeSingleAz2 = "SINGLE_AZ_2" + + // OntapDeploymentTypeMultiAz2 is a OntapDeploymentType enum value + OntapDeploymentTypeMultiAz2 = "MULTI_AZ_2" ) // OntapDeploymentType_Values returns all elements of the OntapDeploymentType enum @@ -25747,6 +25824,7 @@ func OntapDeploymentType_Values() []string { OntapDeploymentTypeMultiAz1, OntapDeploymentTypeSingleAz1, OntapDeploymentTypeSingleAz2, + OntapDeploymentTypeMultiAz2, } } @@ -25817,6 +25895,12 @@ const ( // OpenZFSDeploymentTypeSingleAz2 is a OpenZFSDeploymentType enum value OpenZFSDeploymentTypeSingleAz2 = "SINGLE_AZ_2" + // OpenZFSDeploymentTypeSingleAzHa1 is a OpenZFSDeploymentType enum value + OpenZFSDeploymentTypeSingleAzHa1 = "SINGLE_AZ_HA_1" + + // OpenZFSDeploymentTypeSingleAzHa2 is a OpenZFSDeploymentType enum value + OpenZFSDeploymentTypeSingleAzHa2 = "SINGLE_AZ_HA_2" + // OpenZFSDeploymentTypeMultiAz1 is a OpenZFSDeploymentType enum value OpenZFSDeploymentTypeMultiAz1 = "MULTI_AZ_1" ) @@ -25826,6 +25910,8 @@ func OpenZFSDeploymentType_Values() []string { return []string{ OpenZFSDeploymentTypeSingleAz1, OpenZFSDeploymentTypeSingleAz2, + OpenZFSDeploymentTypeSingleAzHa1, + OpenZFSDeploymentTypeSingleAzHa2, OpenZFSDeploymentTypeMultiAz1, } } @@ -26106,6 +26192,9 @@ const ( // StatusUpdatedOptimizing is a Status enum value StatusUpdatedOptimizing = "UPDATED_OPTIMIZING" + + // StatusOptimizing is a Status enum value + StatusOptimizing = "OPTIMIZING" ) // Status_Values returns all elements of the Status enum @@ -26116,6 +26205,7 @@ func Status_Values() []string { StatusPending, StatusCompleted, StatusUpdatedOptimizing, + StatusOptimizing, } } diff --git a/service/opensearchservice/api.go b/service/opensearchservice/api.go index 6d1f518410c..1ef7db0895c 100644 --- a/service/opensearchservice/api.go +++ b/service/opensearchservice/api.go @@ -6653,6 +6653,114 @@ func (c *OpenSearchService) UpgradeDomainWithContext(ctx aws.Context, input *Upg return out, req.Send() } +// Container for parameters required to enable all machine learning features. +type AIMLOptionsInput_ struct { + _ struct{} `type:"structure"` + + // Container for parameters required for natural language query generation on + // the specified domain. + NaturalLanguageQueryGenerationOptions *NaturalLanguageQueryGenerationOptionsInput_ `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsInput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsInput_) GoString() string { + return s.String() +} + +// SetNaturalLanguageQueryGenerationOptions sets the NaturalLanguageQueryGenerationOptions field's value. +func (s *AIMLOptionsInput_) SetNaturalLanguageQueryGenerationOptions(v *NaturalLanguageQueryGenerationOptionsInput_) *AIMLOptionsInput_ { + s.NaturalLanguageQueryGenerationOptions = v + return s +} + +// Container for parameters representing the state of machine learning features +// on the specified domain. +type AIMLOptionsOutput_ struct { + _ struct{} `type:"structure"` + + // Container for parameters required for natural language query generation on + // the specified domain. + NaturalLanguageQueryGenerationOptions *NaturalLanguageQueryGenerationOptionsOutput_ `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsOutput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsOutput_) GoString() string { + return s.String() +} + +// SetNaturalLanguageQueryGenerationOptions sets the NaturalLanguageQueryGenerationOptions field's value. +func (s *AIMLOptionsOutput_) SetNaturalLanguageQueryGenerationOptions(v *NaturalLanguageQueryGenerationOptionsOutput_) *AIMLOptionsOutput_ { + s.NaturalLanguageQueryGenerationOptions = v + return s +} + +// The status of machine learning options on the specified domain. +type AIMLOptionsStatus struct { + _ struct{} `type:"structure"` + + // Machine learning options on the specified domain. + Options *AIMLOptionsOutput_ `type:"structure"` + + // Provides the current status of an entity. + Status *OptionStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AIMLOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *AIMLOptionsStatus) SetOptions(v *AIMLOptionsOutput_) *AIMLOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AIMLOptionsStatus) SetStatus(v *OptionStatus) *AIMLOptionsStatus { + s.Status = v + return s +} + // Information about an Amazon OpenSearch Service domain. type AWSDomainInformation struct { _ struct{} `type:"structure"` @@ -9404,6 +9512,9 @@ func (s *ConnectionProperties) SetEndpoint(v string) *ConnectionProperties { type CreateDomainInput struct { _ struct{} `type:"structure"` + // Options for all machine learning features for the specified domain. + AIMLOptions *AIMLOptionsInput_ `type:"structure"` + // Identity and Access Management (IAM) policy document specifying the access // policies for the new domain. AccessPolicies *string `type:"string"` @@ -9599,6 +9710,12 @@ func (s *CreateDomainInput) Validate() error { return nil } +// SetAIMLOptions sets the AIMLOptions field's value. +func (s *CreateDomainInput) SetAIMLOptions(v *AIMLOptionsInput_) *CreateDomainInput { + s.AIMLOptions = v + return s +} + // SetAccessPolicies sets the AccessPolicies field's value. func (s *CreateDomainInput) SetAccessPolicies(v string) *CreateDomainInput { s.AccessPolicies = &v @@ -12775,6 +12892,9 @@ func (s *DissociatePackageOutput) SetDomainPackageDetails(v *DomainPackageDetail type DomainConfig struct { _ struct{} `type:"structure"` + // Container for parameters required to enable all machine learning features. + AIMLOptions *AIMLOptionsStatus `type:"structure"` + // Specifies the access policies for the domain. AccessPolicies *AccessPoliciesStatus `type:"structure"` @@ -12859,6 +12979,12 @@ func (s DomainConfig) GoString() string { return s.String() } +// SetAIMLOptions sets the AIMLOptions field's value. +func (s *DomainConfig) SetAIMLOptions(v *AIMLOptionsStatus) *DomainConfig { + s.AIMLOptions = v + return s +} + // SetAccessPolicies sets the AccessPolicies field's value. func (s *DomainConfig) SetAccessPolicies(v *AccessPoliciesStatus) *DomainConfig { s.AccessPolicies = v @@ -13503,6 +13629,9 @@ func (s *DomainPackageDetails) SetReferencePath(v string) *DomainPackageDetails type DomainStatus struct { _ struct{} `type:"structure"` + // Container for parameters required to enable all machine learning features. + AIMLOptions *AIMLOptionsOutput_ `type:"structure"` + // The Amazon Resource Name (ARN) of the domain. For more information, see IAM // identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in the AWS Identity and Access Management User Guide. @@ -13650,6 +13779,12 @@ func (s DomainStatus) GoString() string { return s.String() } +// SetAIMLOptions sets the AIMLOptions field's value. +func (s *DomainStatus) SetAIMLOptions(v *AIMLOptionsOutput_) *DomainStatus { + s.AIMLOptions = v + return s +} + // SetARN sets the ARN field's value. func (s *DomainStatus) SetARN(v string) *DomainStatus { s.ARN = &v @@ -17336,6 +17471,84 @@ func (s *ModifyingProperties) SetValueType(v string) *ModifyingProperties { return s } +// Container for parameters required to enable the natural language query generation +// feature. +type NaturalLanguageQueryGenerationOptionsInput_ struct { + _ struct{} `type:"structure"` + + // The desired state of the natural language query generation feature. Valid + // values are ENABLED and DISABLED. + DesiredState *string `type:"string" enum:"NaturalLanguageQueryGenerationDesiredState"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NaturalLanguageQueryGenerationOptionsInput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NaturalLanguageQueryGenerationOptionsInput_) GoString() string { + return s.String() +} + +// SetDesiredState sets the DesiredState field's value. +func (s *NaturalLanguageQueryGenerationOptionsInput_) SetDesiredState(v string) *NaturalLanguageQueryGenerationOptionsInput_ { + s.DesiredState = &v + return s +} + +// Container for parameters representing the state of the natural language query +// generation feature on the specified domain. +type NaturalLanguageQueryGenerationOptionsOutput_ struct { + _ struct{} `type:"structure"` + + // The current state of the natural language query generation feature, indicating + // completion, in progress, or failure. + CurrentState *string `type:"string" enum:"NaturalLanguageQueryGenerationCurrentState"` + + // The desired state of the natural language query generation feature. Valid + // values are ENABLED and DISABLED. + DesiredState *string `type:"string" enum:"NaturalLanguageQueryGenerationDesiredState"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NaturalLanguageQueryGenerationOptionsOutput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NaturalLanguageQueryGenerationOptionsOutput_) GoString() string { + return s.String() +} + +// SetCurrentState sets the CurrentState field's value. +func (s *NaturalLanguageQueryGenerationOptionsOutput_) SetCurrentState(v string) *NaturalLanguageQueryGenerationOptionsOutput_ { + s.CurrentState = &v + return s +} + +// SetDesiredState sets the DesiredState field's value. +func (s *NaturalLanguageQueryGenerationOptionsOutput_) SetDesiredState(v string) *NaturalLanguageQueryGenerationOptionsOutput_ { + s.DesiredState = &v + return s +} + // Enables or disables node-to-node encryption. For more information, see Node-to-node // encryption for Amazon OpenSearch Service (https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ntn.html). type NodeToNodeEncryptionOptions struct { @@ -20245,6 +20458,9 @@ func (s *UpdateDataSourceOutput) SetMessage(v string) *UpdateDataSourceOutput { type UpdateDomainConfigInput struct { _ struct{} `type:"structure"` + // Options for all machine learning features for the specified domain. + AIMLOptions *AIMLOptionsInput_ `type:"structure"` + // Identity and Access Management (IAM) access policy as a JSON-formatted string. AccessPolicies *string `type:"string"` @@ -20419,6 +20635,12 @@ func (s *UpdateDomainConfigInput) Validate() error { return nil } +// SetAIMLOptions sets the AIMLOptions field's value. +func (s *UpdateDomainConfigInput) SetAIMLOptions(v *AIMLOptionsInput_) *UpdateDomainConfigInput { + s.AIMLOptions = v + return s +} + // SetAccessPolicies sets the AccessPolicies field's value. func (s *UpdateDomainConfigInput) SetAccessPolicies(v string) *UpdateDomainConfigInput { s.AccessPolicies = &v @@ -22448,6 +22670,58 @@ func MasterNodeStatus_Values() []string { } } +const ( + // NaturalLanguageQueryGenerationCurrentStateNotEnabled is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateNotEnabled = "NOT_ENABLED" + + // NaturalLanguageQueryGenerationCurrentStateEnableComplete is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateEnableComplete = "ENABLE_COMPLETE" + + // NaturalLanguageQueryGenerationCurrentStateEnableInProgress is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateEnableInProgress = "ENABLE_IN_PROGRESS" + + // NaturalLanguageQueryGenerationCurrentStateEnableFailed is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateEnableFailed = "ENABLE_FAILED" + + // NaturalLanguageQueryGenerationCurrentStateDisableComplete is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateDisableComplete = "DISABLE_COMPLETE" + + // NaturalLanguageQueryGenerationCurrentStateDisableInProgress is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateDisableInProgress = "DISABLE_IN_PROGRESS" + + // NaturalLanguageQueryGenerationCurrentStateDisableFailed is a NaturalLanguageQueryGenerationCurrentState enum value + NaturalLanguageQueryGenerationCurrentStateDisableFailed = "DISABLE_FAILED" +) + +// NaturalLanguageQueryGenerationCurrentState_Values returns all elements of the NaturalLanguageQueryGenerationCurrentState enum +func NaturalLanguageQueryGenerationCurrentState_Values() []string { + return []string{ + NaturalLanguageQueryGenerationCurrentStateNotEnabled, + NaturalLanguageQueryGenerationCurrentStateEnableComplete, + NaturalLanguageQueryGenerationCurrentStateEnableInProgress, + NaturalLanguageQueryGenerationCurrentStateEnableFailed, + NaturalLanguageQueryGenerationCurrentStateDisableComplete, + NaturalLanguageQueryGenerationCurrentStateDisableInProgress, + NaturalLanguageQueryGenerationCurrentStateDisableFailed, + } +} + +const ( + // NaturalLanguageQueryGenerationDesiredStateEnabled is a NaturalLanguageQueryGenerationDesiredState enum value + NaturalLanguageQueryGenerationDesiredStateEnabled = "ENABLED" + + // NaturalLanguageQueryGenerationDesiredStateDisabled is a NaturalLanguageQueryGenerationDesiredState enum value + NaturalLanguageQueryGenerationDesiredStateDisabled = "DISABLED" +) + +// NaturalLanguageQueryGenerationDesiredState_Values returns all elements of the NaturalLanguageQueryGenerationDesiredState enum +func NaturalLanguageQueryGenerationDesiredState_Values() []string { + return []string{ + NaturalLanguageQueryGenerationDesiredStateEnabled, + NaturalLanguageQueryGenerationDesiredStateDisabled, + } +} + const ( // NodeStatusActive is a NodeStatus enum value NodeStatusActive = "Active" diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index a423675bb81..00710bb6b0b 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -4388,6 +4388,97 @@ func (c *SageMaker) CreateNotebookInstanceLifecycleConfigWithContext(ctx aws.Con return out, req.Send() } +const opCreateOptimizationJob = "CreateOptimizationJob" + +// CreateOptimizationJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateOptimizationJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOptimizationJob for more information on using the CreateOptimizationJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateOptimizationJobRequest method. +// req, resp := client.CreateOptimizationJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateOptimizationJob +func (c *SageMaker) CreateOptimizationJobRequest(input *CreateOptimizationJobInput) (req *request.Request, output *CreateOptimizationJobOutput) { + op := &request.Operation{ + Name: opCreateOptimizationJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOptimizationJobInput{} + } + + output = &CreateOptimizationJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOptimizationJob API operation for Amazon SageMaker Service. +// +// Creates a job that optimizes a model for inference performance. To create +// the job, you provide the location of a source model, and you provide the +// settings for the optimization techniques that you want the job to apply. +// When the job completes successfully, SageMaker uploads the new optimized +// model to the output destination that you specify. +// +// For more information about how to use this action, and about the supported +// optimization techniques, see Optimize model inference with Amazon SageMaker +// (https://docs.aws.amazon.com/sagemaker/latest/dg/model-optimize.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateOptimizationJob for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUse +// Resource being accessed is in use. +// +// - ResourceLimitExceeded +// You have exceeded an SageMaker resource limit. For example, you might have +// too many training jobs created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateOptimizationJob +func (c *SageMaker) CreateOptimizationJob(input *CreateOptimizationJobInput) (*CreateOptimizationJobOutput, error) { + req, out := c.CreateOptimizationJobRequest(input) + return out, req.Send() +} + +// CreateOptimizationJobWithContext is the same as CreateOptimizationJob with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOptimizationJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateOptimizationJobWithContext(ctx aws.Context, input *CreateOptimizationJobInput, opts ...request.Option) (*CreateOptimizationJobOutput, error) { + req, out := c.CreateOptimizationJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreatePipeline = "CreatePipeline" // CreatePipelineRequest generates a "aws/request.Request" representing the @@ -9083,6 +9174,85 @@ func (c *SageMaker) DeleteNotebookInstanceLifecycleConfigWithContext(ctx aws.Con return out, req.Send() } +const opDeleteOptimizationJob = "DeleteOptimizationJob" + +// DeleteOptimizationJobRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOptimizationJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteOptimizationJob for more information on using the DeleteOptimizationJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteOptimizationJobRequest method. +// req, resp := client.DeleteOptimizationJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteOptimizationJob +func (c *SageMaker) DeleteOptimizationJobRequest(input *DeleteOptimizationJobInput) (req *request.Request, output *DeleteOptimizationJobOutput) { + op := &request.Operation{ + Name: opDeleteOptimizationJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOptimizationJobInput{} + } + + output = &DeleteOptimizationJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteOptimizationJob API operation for Amazon SageMaker Service. +// +// Deletes an optimization job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteOptimizationJob for usage and error information. +// +// Returned Error Types: +// - ResourceNotFound +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteOptimizationJob +func (c *SageMaker) DeleteOptimizationJob(input *DeleteOptimizationJobInput) (*DeleteOptimizationJobOutput, error) { + req, out := c.DeleteOptimizationJobRequest(input) + return out, req.Send() +} + +// DeleteOptimizationJobWithContext is the same as DeleteOptimizationJob with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteOptimizationJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteOptimizationJobWithContext(ctx aws.Context, input *DeleteOptimizationJobInput, opts ...request.Option) (*DeleteOptimizationJobOutput, error) { + req, out := c.DeleteOptimizationJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeletePipeline = "DeletePipeline" // DeletePipelineRequest generates a "aws/request.Request" representing the @@ -13634,6 +13804,84 @@ func (c *SageMaker) DescribeNotebookInstanceLifecycleConfigWithContext(ctx aws.C return out, req.Send() } +const opDescribeOptimizationJob = "DescribeOptimizationJob" + +// DescribeOptimizationJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOptimizationJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeOptimizationJob for more information on using the DescribeOptimizationJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeOptimizationJobRequest method. +// req, resp := client.DescribeOptimizationJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeOptimizationJob +func (c *SageMaker) DescribeOptimizationJobRequest(input *DescribeOptimizationJobInput) (req *request.Request, output *DescribeOptimizationJobOutput) { + op := &request.Operation{ + Name: opDescribeOptimizationJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeOptimizationJobInput{} + } + + output = &DescribeOptimizationJobOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeOptimizationJob API operation for Amazon SageMaker Service. +// +// Provides the properties of the specified optimization job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DescribeOptimizationJob for usage and error information. +// +// Returned Error Types: +// - ResourceNotFound +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DescribeOptimizationJob +func (c *SageMaker) DescribeOptimizationJob(input *DescribeOptimizationJobInput) (*DescribeOptimizationJobOutput, error) { + req, out := c.DescribeOptimizationJobRequest(input) + return out, req.Send() +} + +// DescribeOptimizationJobWithContext is the same as DescribeOptimizationJob with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeOptimizationJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DescribeOptimizationJobWithContext(ctx aws.Context, input *DescribeOptimizationJobInput, opts ...request.Option) (*DescribeOptimizationJobOutput, error) { + req, out := c.DescribeOptimizationJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribePipeline = "DescribePipeline" // DescribePipelineRequest generates a "aws/request.Request" representing the @@ -22788,6 +23036,136 @@ func (c *SageMaker) ListNotebookInstancesPagesWithContext(ctx aws.Context, input return p.Err() } +const opListOptimizationJobs = "ListOptimizationJobs" + +// ListOptimizationJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListOptimizationJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListOptimizationJobs for more information on using the ListOptimizationJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListOptimizationJobsRequest method. +// req, resp := client.ListOptimizationJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListOptimizationJobs +func (c *SageMaker) ListOptimizationJobsRequest(input *ListOptimizationJobsInput) (req *request.Request, output *ListOptimizationJobsOutput) { + op := &request.Operation{ + Name: opListOptimizationJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOptimizationJobsInput{} + } + + output = &ListOptimizationJobsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListOptimizationJobs API operation for Amazon SageMaker Service. +// +// Lists the optimization jobs in your account and their properties. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListOptimizationJobs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListOptimizationJobs +func (c *SageMaker) ListOptimizationJobs(input *ListOptimizationJobsInput) (*ListOptimizationJobsOutput, error) { + req, out := c.ListOptimizationJobsRequest(input) + return out, req.Send() +} + +// ListOptimizationJobsWithContext is the same as ListOptimizationJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListOptimizationJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListOptimizationJobsWithContext(ctx aws.Context, input *ListOptimizationJobsInput, opts ...request.Option) (*ListOptimizationJobsOutput, error) { + req, out := c.ListOptimizationJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListOptimizationJobsPages iterates over the pages of a ListOptimizationJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOptimizationJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOptimizationJobs operation. +// pageNum := 0 +// err := client.ListOptimizationJobsPages(params, +// func(page *sagemaker.ListOptimizationJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SageMaker) ListOptimizationJobsPages(input *ListOptimizationJobsInput, fn func(*ListOptimizationJobsOutput, bool) bool) error { + return c.ListOptimizationJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListOptimizationJobsPagesWithContext same as ListOptimizationJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListOptimizationJobsPagesWithContext(ctx aws.Context, input *ListOptimizationJobsInput, fn func(*ListOptimizationJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListOptimizationJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListOptimizationJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListOptimizationJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPipelineExecutionSteps = "ListPipelineExecutionSteps" // ListPipelineExecutionStepsRequest generates a "aws/request.Request" representing the @@ -27628,6 +28006,85 @@ func (c *SageMaker) StopNotebookInstanceWithContext(ctx aws.Context, input *Stop return out, req.Send() } +const opStopOptimizationJob = "StopOptimizationJob" + +// StopOptimizationJobRequest generates a "aws/request.Request" representing the +// client's request for the StopOptimizationJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopOptimizationJob for more information on using the StopOptimizationJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StopOptimizationJobRequest method. +// req, resp := client.StopOptimizationJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopOptimizationJob +func (c *SageMaker) StopOptimizationJobRequest(input *StopOptimizationJobInput) (req *request.Request, output *StopOptimizationJobOutput) { + op := &request.Operation{ + Name: opStopOptimizationJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopOptimizationJobInput{} + } + + output = &StopOptimizationJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopOptimizationJob API operation for Amazon SageMaker Service. +// +// Ends a running inference optimization job. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation StopOptimizationJob for usage and error information. +// +// Returned Error Types: +// - ResourceNotFound +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/StopOptimizationJob +func (c *SageMaker) StopOptimizationJob(input *StopOptimizationJobInput) (*StopOptimizationJobOutput, error) { + req, out := c.StopOptimizationJobRequest(input) + return out, req.Send() +} + +// StopOptimizationJobWithContext is the same as StopOptimizationJob with the addition of +// the ability to pass a context and additional request options. +// +// See StopOptimizationJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) StopOptimizationJobWithContext(ctx aws.Context, input *StopOptimizationJobInput, opts ...request.Option) (*StopOptimizationJobOutput, error) { + req, out := c.StopOptimizationJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStopPipelineExecution = "StopPipelineExecution" // StopPipelineExecutionRequest generates a "aws/request.Request" representing the @@ -31695,6 +32152,76 @@ func (s *AdditionalInferenceSpecificationDefinition) SetSupportedTransformInstan return s } +// Data sources that are available to your model in addition to the one that +// you specify for ModelDataSource when you use the CreateModel action. +type AdditionalModelDataSource struct { + _ struct{} `type:"structure"` + + // A custom name for this AdditionalModelDataSource object. + // + // ChannelName is a required field + ChannelName *string `min:"1" type:"string" required:"true"` + + // Specifies the S3 location of ML model data to deploy. + // + // S3DataSource is a required field + S3DataSource *S3ModelDataSource `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AdditionalModelDataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AdditionalModelDataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdditionalModelDataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdditionalModelDataSource"} + if s.ChannelName == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelName")) + } + if s.ChannelName != nil && len(*s.ChannelName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelName", 1)) + } + if s.S3DataSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3DataSource")) + } + if s.S3DataSource != nil { + if err := s.S3DataSource.Validate(); err != nil { + invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelName sets the ChannelName field's value. +func (s *AdditionalModelDataSource) SetChannelName(v string) *AdditionalModelDataSource { + s.ChannelName = &v + return s +} + +// SetS3DataSource sets the S3DataSource field's value. +func (s *AdditionalModelDataSource) SetS3DataSource(v *S3ModelDataSource) *AdditionalModelDataSource { + s.S3DataSource = v + return s +} + // A data source used for training or inference that is in addition to the input // dataset or model data. type AdditionalS3DataSource struct { @@ -32418,6 +32945,48 @@ func (s *AlgorithmValidationSpecification) SetValidationRole(v string) *Algorith return s } +// A collection of settings that configure the Amazon Q experience within the +// domain. +type AmazonQSettings struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon Q profile used within the domain. + QProfileArn *string `type:"string"` + + // Whether Amazon Q has been enabled within the domain. + Status *string `type:"string" enum:"FeatureStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonQSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AmazonQSettings) GoString() string { + return s.String() +} + +// SetQProfileArn sets the QProfileArn field's value. +func (s *AmazonQSettings) SetQProfileArn(v string) *AmazonQSettings { + s.QProfileArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AmazonQSettings) SetStatus(v string) *AmazonQSettings { + s.Status = &v + return s +} + // Configures how labels are consolidated across human workers and processes // output data. type AnnotationConsolidationConfig struct { @@ -40019,6 +40588,10 @@ func (s *ContainerConfig) SetContainerEnvironmentVariables(v map[string]*string) type ContainerDefinition struct { _ struct{} `type:"structure"` + // Data sources that are available to your model in addition to the one that + // you specify for ModelDataSource when you use the CreateModel action. + AdditionalModelDataSources []*AdditionalModelDataSource `type:"list"` + // This parameter is ignored for models that contain only a PrimaryContainer. // // When a ContainerDefinition is part of an inference pipeline, the value of @@ -40134,6 +40707,16 @@ func (s *ContainerDefinition) Validate() error { if s.ModelPackageName != nil && len(*s.ModelPackageName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ModelPackageName", 1)) } + if s.AdditionalModelDataSources != nil { + for i, v := range s.AdditionalModelDataSources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalModelDataSources", i), err.(request.ErrInvalidParams)) + } + } + } if s.ImageConfig != nil { if err := s.ImageConfig.Validate(); err != nil { invalidParams.AddNested("ImageConfig", err.(request.ErrInvalidParams)) @@ -40151,6 +40734,12 @@ func (s *ContainerDefinition) Validate() error { return nil } +// SetAdditionalModelDataSources sets the AdditionalModelDataSources field's value. +func (s *ContainerDefinition) SetAdditionalModelDataSources(v []*AdditionalModelDataSource) *ContainerDefinition { + s.AdditionalModelDataSources = v + return s +} + // SetContainerHostname sets the ContainerHostname field's value. func (s *ContainerDefinition) SetContainerHostname(v string) *ContainerDefinition { s.ContainerHostname = &v @@ -49074,6 +49663,267 @@ func (s *CreateNotebookInstanceOutput) SetNotebookInstanceArn(v string) *CreateN return s } +type CreateOptimizationJobInput struct { + _ struct{} `type:"structure"` + + // The type of instance that hosts the optimized model that you create with + // the optimization job. + // + // DeploymentInstanceType is a required field + DeploymentInstanceType *string `type:"string" required:"true" enum:"OptimizationJobDeploymentInstanceType"` + + // The location of the source model to optimize with an optimization job. + // + // ModelSource is a required field + ModelSource *OptimizationJobModelSource `type:"structure" required:"true"` + + // Settings for each of the optimization techniques that the job applies. + // + // OptimizationConfigs is a required field + OptimizationConfigs []*OptimizationConfig `type:"list" required:"true"` + + // The environment variables to set in the model container. + OptimizationEnvironment map[string]*string `type:"map"` + + // A custom name for the new optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` + + // Details for where to store the optimized model that you create with the optimization + // job. + // + // OutputConfig is a required field + OutputConfig *OptimizationJobOutputConfig `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + // to perform tasks on your behalf. + // + // During model optimization, Amazon SageMaker needs your permission to: + // + // * Read input data from an S3 bucket + // + // * Write model artifacts to an S3 bucket + // + // * Write logs to Amazon CloudWatch Logs + // + // * Publish metrics to Amazon CloudWatch + // + // You grant permissions for all of these tasks to an IAM role. To pass this + // role to Amazon SageMaker, the caller of this API must have the iam:PassRole + // permission. For more information, see Amazon SageMaker Roles. (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // Specifies a limit to how long a job can run. When the job reaches the time + // limit, SageMaker ends the job. Use this API to cap costs. + // + // To stop a training job, SageMaker sends the algorithm the SIGTERM signal, + // which delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. + // + // The training algorithms provided by SageMaker automatically save the intermediate + // results of a model training job when possible. This attempt to save artifacts + // is only a best effort case as model might not be in a state from which it + // can be saved. For example, if training has just started, the model might + // not be ready to save. When saved, this intermediate data is a valid model + // artifact. You can use it to create a model with CreateModel. + // + // The Neural Topic Model (NTM) currently does not support saving intermediate + // model artifacts. When training NTMs, make sure that the maximum runtime is + // sufficient for the training job to complete. + // + // StoppingCondition is a required field + StoppingCondition *StoppingCondition `type:"structure" required:"true"` + + // A list of key-value pairs associated with the optimization job. For more + // information, see Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the Amazon Web Services General Reference Guide. + Tags []*Tag `type:"list"` + + // A VPC in Amazon VPC that your optimized model has access to. + VpcConfig *OptimizationVpcConfig `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateOptimizationJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateOptimizationJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOptimizationJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOptimizationJobInput"} + if s.DeploymentInstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("DeploymentInstanceType")) + } + if s.ModelSource == nil { + invalidParams.Add(request.NewErrParamRequired("ModelSource")) + } + if s.OptimizationConfigs == nil { + invalidParams.Add(request.NewErrParamRequired("OptimizationConfigs")) + } + if s.OptimizationJobName == nil { + invalidParams.Add(request.NewErrParamRequired("OptimizationJobName")) + } + if s.OptimizationJobName != nil && len(*s.OptimizationJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OptimizationJobName", 1)) + } + if s.OutputConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OutputConfig")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StoppingCondition == nil { + invalidParams.Add(request.NewErrParamRequired("StoppingCondition")) + } + if s.ModelSource != nil { + if err := s.ModelSource.Validate(); err != nil { + invalidParams.AddNested("ModelSource", err.(request.ErrInvalidParams)) + } + } + if s.OutputConfig != nil { + if err := s.OutputConfig.Validate(); err != nil { + invalidParams.AddNested("OutputConfig", err.(request.ErrInvalidParams)) + } + } + if s.StoppingCondition != nil { + if err := s.StoppingCondition.Validate(); err != nil { + invalidParams.AddNested("StoppingCondition", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.VpcConfig != nil { + if err := s.VpcConfig.Validate(); err != nil { + invalidParams.AddNested("VpcConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeploymentInstanceType sets the DeploymentInstanceType field's value. +func (s *CreateOptimizationJobInput) SetDeploymentInstanceType(v string) *CreateOptimizationJobInput { + s.DeploymentInstanceType = &v + return s +} + +// SetModelSource sets the ModelSource field's value. +func (s *CreateOptimizationJobInput) SetModelSource(v *OptimizationJobModelSource) *CreateOptimizationJobInput { + s.ModelSource = v + return s +} + +// SetOptimizationConfigs sets the OptimizationConfigs field's value. +func (s *CreateOptimizationJobInput) SetOptimizationConfigs(v []*OptimizationConfig) *CreateOptimizationJobInput { + s.OptimizationConfigs = v + return s +} + +// SetOptimizationEnvironment sets the OptimizationEnvironment field's value. +func (s *CreateOptimizationJobInput) SetOptimizationEnvironment(v map[string]*string) *CreateOptimizationJobInput { + s.OptimizationEnvironment = v + return s +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *CreateOptimizationJobInput) SetOptimizationJobName(v string) *CreateOptimizationJobInput { + s.OptimizationJobName = &v + return s +} + +// SetOutputConfig sets the OutputConfig field's value. +func (s *CreateOptimizationJobInput) SetOutputConfig(v *OptimizationJobOutputConfig) *CreateOptimizationJobInput { + s.OutputConfig = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateOptimizationJobInput) SetRoleArn(v string) *CreateOptimizationJobInput { + s.RoleArn = &v + return s +} + +// SetStoppingCondition sets the StoppingCondition field's value. +func (s *CreateOptimizationJobInput) SetStoppingCondition(v *StoppingCondition) *CreateOptimizationJobInput { + s.StoppingCondition = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateOptimizationJobInput) SetTags(v []*Tag) *CreateOptimizationJobInput { + s.Tags = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *CreateOptimizationJobInput) SetVpcConfig(v *OptimizationVpcConfig) *CreateOptimizationJobInput { + s.VpcConfig = v + return s +} + +type CreateOptimizationJobOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the optimization job. + // + // OptimizationJobArn is a required field + OptimizationJobArn *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateOptimizationJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateOptimizationJobOutput) GoString() string { + return s.String() +} + +// SetOptimizationJobArn sets the OptimizationJobArn field's value. +func (s *CreateOptimizationJobOutput) SetOptimizationJobArn(v string) *CreateOptimizationJobOutput { + s.OptimizationJobArn = &v + return s +} + type CreatePipelineInput struct { _ struct{} `type:"structure"` @@ -56755,6 +57605,77 @@ func (s DeleteNotebookInstanceOutput) GoString() string { return s.String() } +type DeleteOptimizationJobInput struct { + _ struct{} `type:"structure"` + + // The name that you assigned to the optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteOptimizationJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteOptimizationJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOptimizationJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOptimizationJobInput"} + if s.OptimizationJobName == nil { + invalidParams.Add(request.NewErrParamRequired("OptimizationJobName")) + } + if s.OptimizationJobName != nil && len(*s.OptimizationJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OptimizationJobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *DeleteOptimizationJobInput) SetOptimizationJobName(v string) *DeleteOptimizationJobInput { + s.OptimizationJobName = &v + return s +} + +type DeleteOptimizationJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteOptimizationJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteOptimizationJobOutput) GoString() string { + return s.String() +} + type DeletePipelineInput struct { _ struct{} `type:"structure"` @@ -67644,6 +68565,270 @@ func (s *DescribeNotebookInstanceOutput) SetVolumeSizeInGB(v int64) *DescribeNot return s } +type DescribeOptimizationJobInput struct { + _ struct{} `type:"structure"` + + // The name that you assigned to the optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeOptimizationJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeOptimizationJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOptimizationJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOptimizationJobInput"} + if s.OptimizationJobName == nil { + invalidParams.Add(request.NewErrParamRequired("OptimizationJobName")) + } + if s.OptimizationJobName != nil && len(*s.OptimizationJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OptimizationJobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *DescribeOptimizationJobInput) SetOptimizationJobName(v string) *DescribeOptimizationJobInput { + s.OptimizationJobName = &v + return s +} + +type DescribeOptimizationJobOutput struct { + _ struct{} `type:"structure"` + + // The time when you created the optimization job. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" required:"true"` + + // The type of instance that hosts the optimized model that you create with + // the optimization job. + // + // DeploymentInstanceType is a required field + DeploymentInstanceType *string `type:"string" required:"true" enum:"OptimizationJobDeploymentInstanceType"` + + // If the optimization job status is FAILED, the reason for the failure. + FailureReason *string `type:"string"` + + // The time when the optimization job was last updated. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` + + // The location of the source model to optimize with an optimization job. + // + // ModelSource is a required field + ModelSource *OptimizationJobModelSource `type:"structure" required:"true"` + + // Settings for each of the optimization techniques that the job applies. + // + // OptimizationConfigs is a required field + OptimizationConfigs []*OptimizationConfig `type:"list" required:"true"` + + // The time when the optimization job finished processing. + OptimizationEndTime *time.Time `type:"timestamp"` + + // The environment variables to set in the model container. + OptimizationEnvironment map[string]*string `type:"map"` + + // The Amazon Resource Name (ARN) of the optimization job. + // + // OptimizationJobArn is a required field + OptimizationJobArn *string `type:"string" required:"true"` + + // The name that you assigned to the optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` + + // The current status of the optimization job. + // + // OptimizationJobStatus is a required field + OptimizationJobStatus *string `type:"string" required:"true" enum:"OptimizationJobStatus"` + + // Output values produced by an optimization job. + OptimizationOutput *OptimizationOutput_ `type:"structure"` + + // The time when the optimization job started. + OptimizationStartTime *time.Time `type:"timestamp"` + + // Details for where to store the optimized model that you create with the optimization + // job. + // + // OutputConfig is a required field + OutputConfig *OptimizationJobOutputConfig `type:"structure" required:"true"` + + // The ARN of the IAM role that you assigned to the optimization job. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // Specifies a limit to how long a job can run. When the job reaches the time + // limit, SageMaker ends the job. Use this API to cap costs. + // + // To stop a training job, SageMaker sends the algorithm the SIGTERM signal, + // which delays job termination for 120 seconds. Algorithms can use this 120-second + // window to save the model artifacts, so the results of training are not lost. + // + // The training algorithms provided by SageMaker automatically save the intermediate + // results of a model training job when possible. This attempt to save artifacts + // is only a best effort case as model might not be in a state from which it + // can be saved. For example, if training has just started, the model might + // not be ready to save. When saved, this intermediate data is a valid model + // artifact. You can use it to create a model with CreateModel. + // + // The Neural Topic Model (NTM) currently does not support saving intermediate + // model artifacts. When training NTMs, make sure that the maximum runtime is + // sufficient for the training job to complete. + // + // StoppingCondition is a required field + StoppingCondition *StoppingCondition `type:"structure" required:"true"` + + // A VPC in Amazon VPC that your optimized model has access to. + VpcConfig *OptimizationVpcConfig `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeOptimizationJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeOptimizationJobOutput) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeOptimizationJobOutput) SetCreationTime(v time.Time) *DescribeOptimizationJobOutput { + s.CreationTime = &v + return s +} + +// SetDeploymentInstanceType sets the DeploymentInstanceType field's value. +func (s *DescribeOptimizationJobOutput) SetDeploymentInstanceType(v string) *DescribeOptimizationJobOutput { + s.DeploymentInstanceType = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *DescribeOptimizationJobOutput) SetFailureReason(v string) *DescribeOptimizationJobOutput { + s.FailureReason = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *DescribeOptimizationJobOutput) SetLastModifiedTime(v time.Time) *DescribeOptimizationJobOutput { + s.LastModifiedTime = &v + return s +} + +// SetModelSource sets the ModelSource field's value. +func (s *DescribeOptimizationJobOutput) SetModelSource(v *OptimizationJobModelSource) *DescribeOptimizationJobOutput { + s.ModelSource = v + return s +} + +// SetOptimizationConfigs sets the OptimizationConfigs field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationConfigs(v []*OptimizationConfig) *DescribeOptimizationJobOutput { + s.OptimizationConfigs = v + return s +} + +// SetOptimizationEndTime sets the OptimizationEndTime field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationEndTime(v time.Time) *DescribeOptimizationJobOutput { + s.OptimizationEndTime = &v + return s +} + +// SetOptimizationEnvironment sets the OptimizationEnvironment field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationEnvironment(v map[string]*string) *DescribeOptimizationJobOutput { + s.OptimizationEnvironment = v + return s +} + +// SetOptimizationJobArn sets the OptimizationJobArn field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationJobArn(v string) *DescribeOptimizationJobOutput { + s.OptimizationJobArn = &v + return s +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationJobName(v string) *DescribeOptimizationJobOutput { + s.OptimizationJobName = &v + return s +} + +// SetOptimizationJobStatus sets the OptimizationJobStatus field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationJobStatus(v string) *DescribeOptimizationJobOutput { + s.OptimizationJobStatus = &v + return s +} + +// SetOptimizationOutput sets the OptimizationOutput field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationOutput(v *OptimizationOutput_) *DescribeOptimizationJobOutput { + s.OptimizationOutput = v + return s +} + +// SetOptimizationStartTime sets the OptimizationStartTime field's value. +func (s *DescribeOptimizationJobOutput) SetOptimizationStartTime(v time.Time) *DescribeOptimizationJobOutput { + s.OptimizationStartTime = &v + return s +} + +// SetOutputConfig sets the OutputConfig field's value. +func (s *DescribeOptimizationJobOutput) SetOutputConfig(v *OptimizationJobOutputConfig) *DescribeOptimizationJobOutput { + s.OutputConfig = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeOptimizationJobOutput) SetRoleArn(v string) *DescribeOptimizationJobOutput { + s.RoleArn = &v + return s +} + +// SetStoppingCondition sets the StoppingCondition field's value. +func (s *DescribeOptimizationJobOutput) SetStoppingCondition(v *StoppingCondition) *DescribeOptimizationJobOutput { + s.StoppingCondition = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *DescribeOptimizationJobOutput) SetVpcConfig(v *OptimizationVpcConfig) *DescribeOptimizationJobOutput { + s.VpcConfig = v + return s +} + type DescribePipelineDefinitionForExecutionInput struct { _ struct{} `type:"structure"` @@ -71557,6 +72742,10 @@ func (s *DomainDetails) SetUrl(v string) *DomainDetails { type DomainSettings struct { _ struct{} `type:"structure"` + // A collection of settings that configure the Amazon Q experience within the + // domain. The AuthMode that you use to create the domain must be SSO. + AmazonQSettings *AmazonQSettings `type:"structure"` + // A collection of settings that configure the domain's Docker interaction. DockerSettings *DockerSettings `type:"structure"` @@ -71606,6 +72795,12 @@ func (s *DomainSettings) Validate() error { return nil } +// SetAmazonQSettings sets the AmazonQSettings field's value. +func (s *DomainSettings) SetAmazonQSettings(v *AmazonQSettings) *DomainSettings { + s.AmazonQSettings = v + return s +} + // SetDockerSettings sets the DockerSettings field's value. func (s *DomainSettings) SetDockerSettings(v *DockerSettings) *DomainSettings { s.DockerSettings = v @@ -71634,6 +72829,10 @@ func (s *DomainSettings) SetSecurityGroupIds(v []*string) *DomainSettings { type DomainSettingsForUpdate struct { _ struct{} `type:"structure"` + // A collection of settings that configure the Amazon Q experience within the + // domain. + AmazonQSettings *AmazonQSettings `type:"structure"` + // A collection of settings that configure the domain's Docker interaction. DockerSettings *DockerSettings `type:"structure"` @@ -71685,6 +72884,12 @@ func (s *DomainSettingsForUpdate) Validate() error { return nil } +// SetAmazonQSettings sets the AmazonQSettings field's value. +func (s *DomainSettingsForUpdate) SetAmazonQSettings(v *AmazonQSettings) *DomainSettingsForUpdate { + s.AmazonQSettings = v + return s +} + // SetDockerSettings sets the DockerSettings field's value. func (s *DomainSettingsForUpdate) SetDockerSettings(v *DockerSettings) *DomainSettingsForUpdate { s.DockerSettings = v @@ -93656,6 +94861,194 @@ func (s *ListNotebookInstancesOutput) SetNotebookInstances(v []*NotebookInstance return s } +type ListOptimizationJobsInput struct { + _ struct{} `type:"structure"` + + // Filters the results to only those optimization jobs that were created after + // the specified time. + CreationTimeAfter *time.Time `type:"timestamp"` + + // Filters the results to only those optimization jobs that were created before + // the specified time. + CreationTimeBefore *time.Time `type:"timestamp"` + + // Filters the results to only those optimization jobs that were updated after + // the specified time. + LastModifiedTimeAfter *time.Time `type:"timestamp"` + + // Filters the results to only those optimization jobs that were updated before + // the specified time. + LastModifiedTimeBefore *time.Time `type:"timestamp"` + + // The maximum number of optimization jobs to return in the response. The default + // is 50. + MaxResults *int64 `min:"1" type:"integer"` + + // Filters the results to only those optimization jobs with a name that contains + // the specified string. + NameContains *string `type:"string"` + + // A token that you use to get the next set of results following a truncated + // response. If the response to the previous request was truncated, that response + // provides the value for this token. + NextToken *string `type:"string"` + + // Filters the results to only those optimization jobs that apply the specified + // optimization techniques. You can specify either Quantization or Compilation. + OptimizationContains *string `type:"string"` + + // The field by which to sort the optimization jobs in the response. The default + // is CreationTime + SortBy *string `type:"string" enum:"ListOptimizationJobsSortBy"` + + // The sort order for results. The default is Ascending + SortOrder *string `type:"string" enum:"SortOrder"` + + // Filters the results to only those optimization jobs with the specified status. + StatusEquals *string `type:"string" enum:"OptimizationJobStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListOptimizationJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListOptimizationJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListOptimizationJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOptimizationJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreationTimeAfter sets the CreationTimeAfter field's value. +func (s *ListOptimizationJobsInput) SetCreationTimeAfter(v time.Time) *ListOptimizationJobsInput { + s.CreationTimeAfter = &v + return s +} + +// SetCreationTimeBefore sets the CreationTimeBefore field's value. +func (s *ListOptimizationJobsInput) SetCreationTimeBefore(v time.Time) *ListOptimizationJobsInput { + s.CreationTimeBefore = &v + return s +} + +// SetLastModifiedTimeAfter sets the LastModifiedTimeAfter field's value. +func (s *ListOptimizationJobsInput) SetLastModifiedTimeAfter(v time.Time) *ListOptimizationJobsInput { + s.LastModifiedTimeAfter = &v + return s +} + +// SetLastModifiedTimeBefore sets the LastModifiedTimeBefore field's value. +func (s *ListOptimizationJobsInput) SetLastModifiedTimeBefore(v time.Time) *ListOptimizationJobsInput { + s.LastModifiedTimeBefore = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListOptimizationJobsInput) SetMaxResults(v int64) *ListOptimizationJobsInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListOptimizationJobsInput) SetNameContains(v string) *ListOptimizationJobsInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListOptimizationJobsInput) SetNextToken(v string) *ListOptimizationJobsInput { + s.NextToken = &v + return s +} + +// SetOptimizationContains sets the OptimizationContains field's value. +func (s *ListOptimizationJobsInput) SetOptimizationContains(v string) *ListOptimizationJobsInput { + s.OptimizationContains = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListOptimizationJobsInput) SetSortBy(v string) *ListOptimizationJobsInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListOptimizationJobsInput) SetSortOrder(v string) *ListOptimizationJobsInput { + s.SortOrder = &v + return s +} + +// SetStatusEquals sets the StatusEquals field's value. +func (s *ListOptimizationJobsInput) SetStatusEquals(v string) *ListOptimizationJobsInput { + s.StatusEquals = &v + return s +} + +type ListOptimizationJobsOutput struct { + _ struct{} `type:"structure"` + + // The token to use in a subsequent request to get the next set of results following + // a truncated response. + NextToken *string `type:"string"` + + // A list of optimization jobs and their properties that matches any of the + // filters you specified in the request. + // + // OptimizationJobSummaries is a required field + OptimizationJobSummaries []*OptimizationJobSummary `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListOptimizationJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListOptimizationJobsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListOptimizationJobsOutput) SetNextToken(v string) *ListOptimizationJobsOutput { + s.NextToken = &v + return s +} + +// SetOptimizationJobSummaries sets the OptimizationJobSummaries field's value. +func (s *ListOptimizationJobsOutput) SetOptimizationJobSummaries(v []*OptimizationJobSummary) *ListOptimizationJobsOutput { + s.OptimizationJobSummaries = v + return s +} + type ListPipelineExecutionStepsInput struct { _ struct{} `type:"structure"` @@ -98023,6 +99416,49 @@ func (s *ModelClientConfig) SetInvocationsTimeoutInSeconds(v int64) *ModelClient return s } +// Settings for the model compilation technique that's applied by a model optimization +// job. +type ModelCompilationConfig struct { + _ struct{} `type:"structure"` + + // The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the + // optimization. + Image *string `type:"string"` + + // Environment variables that override the default ones in the model container. + OverrideEnvironment map[string]*string `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ModelCompilationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ModelCompilationConfig) GoString() string { + return s.String() +} + +// SetImage sets the Image field's value. +func (s *ModelCompilationConfig) SetImage(v string) *ModelCompilationConfig { + s.Image = &v + return s +} + +// SetOverrideEnvironment sets the OverrideEnvironment field's value. +func (s *ModelCompilationConfig) SetOverrideEnvironment(v map[string]*string) *ModelCompilationConfig { + s.OverrideEnvironment = v + return s +} + // Defines the model configuration. Includes the specification name and environment // parameters. type ModelConfiguration struct { @@ -100842,6 +102278,49 @@ func (s *ModelQualityJobInput) SetGroundTruthS3Input(v *MonitoringGroundTruthS3I return s } +// Settings for the model quantization technique that's applied by a model optimization +// job. +type ModelQuantizationConfig struct { + _ struct{} `type:"structure"` + + // The URI of an LMI DLC in Amazon ECR. SageMaker uses this image to run the + // optimization. + Image *string `type:"string"` + + // Environment variables that override the default ones in the model container. + OverrideEnvironment map[string]*string `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ModelQuantizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ModelQuantizationConfig) GoString() string { + return s.String() +} + +// SetImage sets the Image field's value. +func (s *ModelQuantizationConfig) SetImage(v string) *ModelQuantizationConfig { + s.Image = &v + return s +} + +// SetOverrideEnvironment sets the OverrideEnvironment field's value. +func (s *ModelQuantizationConfig) SetOverrideEnvironment(v map[string]*string) *ModelQuantizationConfig { + s.OverrideEnvironment = v + return s +} + // The model registry settings for the SageMaker Canvas application. type ModelRegisterSettings struct { _ struct{} `type:"structure"` @@ -104280,6 +105759,491 @@ func (s *OnlineStoreSecurityConfig) SetKmsKeyId(v string) *OnlineStoreSecurityCo return s } +// Settings for an optimization technique that you apply with a model optimization +// job. +type OptimizationConfig struct { + _ struct{} `type:"structure"` + + // Settings for the model compilation technique that's applied by a model optimization + // job. + ModelCompilationConfig *ModelCompilationConfig `type:"structure"` + + // Settings for the model quantization technique that's applied by a model optimization + // job. + ModelQuantizationConfig *ModelQuantizationConfig `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationConfig) GoString() string { + return s.String() +} + +// SetModelCompilationConfig sets the ModelCompilationConfig field's value. +func (s *OptimizationConfig) SetModelCompilationConfig(v *ModelCompilationConfig) *OptimizationConfig { + s.ModelCompilationConfig = v + return s +} + +// SetModelQuantizationConfig sets the ModelQuantizationConfig field's value. +func (s *OptimizationConfig) SetModelQuantizationConfig(v *ModelQuantizationConfig) *OptimizationConfig { + s.ModelQuantizationConfig = v + return s +} + +// The location of the source model to optimize with an optimization job. +type OptimizationJobModelSource struct { + _ struct{} `type:"structure"` + + // The Amazon S3 location of a source model to optimize with an optimization + // job. + S3 *OptimizationJobModelSourceS3 `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobModelSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobModelSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptimizationJobModelSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptimizationJobModelSource"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OptimizationJobModelSource) SetS3(v *OptimizationJobModelSourceS3) *OptimizationJobModelSource { + s.S3 = v + return s +} + +// The Amazon S3 location of a source model to optimize with an optimization +// job. +type OptimizationJobModelSourceS3 struct { + _ struct{} `type:"structure"` + + // The access configuration settings for the source ML model for an optimization + // job, where you can accept the model end-user license agreement (EULA). + ModelAccessConfig *OptimizationModelAccessConfig `type:"structure"` + + // An Amazon S3 URI that locates a source model to optimize with an optimization + // job. + S3Uri *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobModelSourceS3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobModelSourceS3) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptimizationJobModelSourceS3) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptimizationJobModelSourceS3"} + if s.ModelAccessConfig != nil { + if err := s.ModelAccessConfig.Validate(); err != nil { + invalidParams.AddNested("ModelAccessConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetModelAccessConfig sets the ModelAccessConfig field's value. +func (s *OptimizationJobModelSourceS3) SetModelAccessConfig(v *OptimizationModelAccessConfig) *OptimizationJobModelSourceS3 { + s.ModelAccessConfig = v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *OptimizationJobModelSourceS3) SetS3Uri(v string) *OptimizationJobModelSourceS3 { + s.S3Uri = &v + return s +} + +// Details for where to store the optimized model that you create with the optimization +// job. +type OptimizationJobOutputConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of a key in Amazon Web Services KMS. SageMaker + // uses they key to encrypt the artifacts of the optimized model when SageMaker + // uploads the model to Amazon S3. + KmsKeyId *string `type:"string"` + + // The Amazon S3 URI for where to store the optimized model that you create + // with an optimization job. + // + // S3OutputLocation is a required field + S3OutputLocation *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobOutputConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobOutputConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptimizationJobOutputConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptimizationJobOutputConfig"} + if s.S3OutputLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3OutputLocation")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *OptimizationJobOutputConfig) SetKmsKeyId(v string) *OptimizationJobOutputConfig { + s.KmsKeyId = &v + return s +} + +// SetS3OutputLocation sets the S3OutputLocation field's value. +func (s *OptimizationJobOutputConfig) SetS3OutputLocation(v string) *OptimizationJobOutputConfig { + s.S3OutputLocation = &v + return s +} + +// Summarizes an optimization job by providing some of its key properties. +type OptimizationJobSummary struct { + _ struct{} `type:"structure"` + + // The time when you created the optimization job. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" required:"true"` + + // The type of instance that hosts the optimized model that you create with + // the optimization job. + // + // DeploymentInstanceType is a required field + DeploymentInstanceType *string `type:"string" required:"true" enum:"OptimizationJobDeploymentInstanceType"` + + // The time when the optimization job was last updated. + LastModifiedTime *time.Time `type:"timestamp"` + + // The time when the optimization job finished processing. + OptimizationEndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the optimization job. + // + // OptimizationJobArn is a required field + OptimizationJobArn *string `type:"string" required:"true"` + + // The name that you assigned to the optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` + + // The current status of the optimization job. + // + // OptimizationJobStatus is a required field + OptimizationJobStatus *string `type:"string" required:"true" enum:"OptimizationJobStatus"` + + // The time when the optimization job started. + OptimizationStartTime *time.Time `type:"timestamp"` + + // The optimization techniques that are applied by the optimization job. + // + // OptimizationTypes is a required field + OptimizationTypes []*string `type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationJobSummary) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *OptimizationJobSummary) SetCreationTime(v time.Time) *OptimizationJobSummary { + s.CreationTime = &v + return s +} + +// SetDeploymentInstanceType sets the DeploymentInstanceType field's value. +func (s *OptimizationJobSummary) SetDeploymentInstanceType(v string) *OptimizationJobSummary { + s.DeploymentInstanceType = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OptimizationJobSummary) SetLastModifiedTime(v time.Time) *OptimizationJobSummary { + s.LastModifiedTime = &v + return s +} + +// SetOptimizationEndTime sets the OptimizationEndTime field's value. +func (s *OptimizationJobSummary) SetOptimizationEndTime(v time.Time) *OptimizationJobSummary { + s.OptimizationEndTime = &v + return s +} + +// SetOptimizationJobArn sets the OptimizationJobArn field's value. +func (s *OptimizationJobSummary) SetOptimizationJobArn(v string) *OptimizationJobSummary { + s.OptimizationJobArn = &v + return s +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *OptimizationJobSummary) SetOptimizationJobName(v string) *OptimizationJobSummary { + s.OptimizationJobName = &v + return s +} + +// SetOptimizationJobStatus sets the OptimizationJobStatus field's value. +func (s *OptimizationJobSummary) SetOptimizationJobStatus(v string) *OptimizationJobSummary { + s.OptimizationJobStatus = &v + return s +} + +// SetOptimizationStartTime sets the OptimizationStartTime field's value. +func (s *OptimizationJobSummary) SetOptimizationStartTime(v time.Time) *OptimizationJobSummary { + s.OptimizationStartTime = &v + return s +} + +// SetOptimizationTypes sets the OptimizationTypes field's value. +func (s *OptimizationJobSummary) SetOptimizationTypes(v []*string) *OptimizationJobSummary { + s.OptimizationTypes = v + return s +} + +// The access configuration settings for the source ML model for an optimization +// job, where you can accept the model end-user license agreement (EULA). +type OptimizationModelAccessConfig struct { + _ struct{} `type:"structure"` + + // Specifies agreement to the model end-user license agreement (EULA). The AcceptEula + // value must be explicitly defined as True in order to accept the EULA that + // this model requires. You are responsible for reviewing and complying with + // any applicable license terms and making sure they are acceptable for your + // use case before downloading or using a model. + // + // AcceptEula is a required field + AcceptEula *bool `type:"boolean" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationModelAccessConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationModelAccessConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptimizationModelAccessConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptimizationModelAccessConfig"} + if s.AcceptEula == nil { + invalidParams.Add(request.NewErrParamRequired("AcceptEula")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptEula sets the AcceptEula field's value. +func (s *OptimizationModelAccessConfig) SetAcceptEula(v bool) *OptimizationModelAccessConfig { + s.AcceptEula = &v + return s +} + +// Output values produced by an optimization job. +type OptimizationOutput_ struct { + _ struct{} `type:"structure"` + + // The image that SageMaker recommends that you use to host the optimized model + // that you created with an optimization job. + RecommendedInferenceImage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationOutput_) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationOutput_) GoString() string { + return s.String() +} + +// SetRecommendedInferenceImage sets the RecommendedInferenceImage field's value. +func (s *OptimizationOutput_) SetRecommendedInferenceImage(v string) *OptimizationOutput_ { + s.RecommendedInferenceImage = &v + return s +} + +// A VPC in Amazon VPC that's accessible to an optimized that you create with +// an optimization job. You can control access to and from your resources by +// configuring a VPC. For more information, see Give SageMaker Access to Resources +// in your Amazon VPC (https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html). +type OptimizationVpcConfig struct { + _ struct{} `type:"structure"` + + // The VPC security group IDs, in the form sg-xxxxxxxx. Specify the security + // groups for the VPC that is specified in the Subnets field. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `min:"1" type:"list" required:"true"` + + // The ID of the subnets in the VPC to which you want to connect your optimized + // model. + // + // Subnets is a required field + Subnets []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationVpcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OptimizationVpcConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptimizationVpcConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptimizationVpcConfig"} + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.SecurityGroupIds != nil && len(s.SecurityGroupIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupIds", 1)) + } + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + if s.Subnets != nil && len(s.Subnets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subnets", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *OptimizationVpcConfig) SetSecurityGroupIds(v []*string) *OptimizationVpcConfig { + s.SecurityGroupIds = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *OptimizationVpcConfig) SetSubnets(v []*string) *OptimizationVpcConfig { + s.Subnets = v + return s +} + // Contains information about the output location for the compiled model and // the target device that the model runs on. TargetDevice and TargetPlatform // are mutually exclusive, so you need to choose one between the two to specify @@ -116938,6 +118902,77 @@ func (s StopNotebookInstanceOutput) GoString() string { return s.String() } +type StopOptimizationJobInput struct { + _ struct{} `type:"structure"` + + // The name that you assigned to the optimization job. + // + // OptimizationJobName is a required field + OptimizationJobName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopOptimizationJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopOptimizationJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopOptimizationJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopOptimizationJobInput"} + if s.OptimizationJobName == nil { + invalidParams.Add(request.NewErrParamRequired("OptimizationJobName")) + } + if s.OptimizationJobName != nil && len(*s.OptimizationJobName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OptimizationJobName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOptimizationJobName sets the OptimizationJobName field's value. +func (s *StopOptimizationJobInput) SetOptimizationJobName(v string) *StopOptimizationJobInput { + s.OptimizationJobName = &v + return s +} + +type StopOptimizationJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopOptimizationJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopOptimizationJobOutput) GoString() string { + return s.String() +} + type StopPipelineExecutionInput struct { _ struct{} `type:"structure"` @@ -117241,10 +119276,8 @@ func (s StopTransformJobOutput) GoString() string { return s.String() } -// Specifies a limit to how long a model training job or model compilation job -// can run. It also specifies how long a managed spot training job has to complete. -// When the job reaches the time limit, SageMaker ends the training or compilation -// job. Use this API to cap model training costs. +// Specifies a limit to how long a job can run. When the job reaches the time +// limit, SageMaker ends the job. Use this API to cap costs. // // To stop a training job, SageMaker sends the algorithm the SIGTERM signal, // which delays job termination for 120 seconds. Algorithms can use this 120-second @@ -133751,6 +135784,26 @@ func ListLabelingJobsForWorkteamSortByOptions_Values() []string { } } +const ( + // ListOptimizationJobsSortByName is a ListOptimizationJobsSortBy enum value + ListOptimizationJobsSortByName = "Name" + + // ListOptimizationJobsSortByCreationTime is a ListOptimizationJobsSortBy enum value + ListOptimizationJobsSortByCreationTime = "CreationTime" + + // ListOptimizationJobsSortByStatus is a ListOptimizationJobsSortBy enum value + ListOptimizationJobsSortByStatus = "Status" +) + +// ListOptimizationJobsSortBy_Values returns all elements of the ListOptimizationJobsSortBy enum +func ListOptimizationJobsSortBy_Values() []string { + return []string{ + ListOptimizationJobsSortByName, + ListOptimizationJobsSortByCreationTime, + ListOptimizationJobsSortByStatus, + } +} + const ( // ListWorkforcesSortByOptionsName is a ListWorkforcesSortByOptions enum value ListWorkforcesSortByOptionsName = "Name" @@ -134676,6 +136729,150 @@ func Operator_Values() []string { } } +const ( + // OptimizationJobDeploymentInstanceTypeMlP4d24xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlP4d24xlarge = "ml.p4d.24xlarge" + + // OptimizationJobDeploymentInstanceTypeMlP4de24xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlP4de24xlarge = "ml.p4de.24xlarge" + + // OptimizationJobDeploymentInstanceTypeMlP548xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlP548xlarge = "ml.p5.48xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG5Xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG5Xlarge = "ml.g5.xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG52xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG52xlarge = "ml.g5.2xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG54xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG54xlarge = "ml.g5.4xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG58xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG58xlarge = "ml.g5.8xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG512xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG512xlarge = "ml.g5.12xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG516xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG516xlarge = "ml.g5.16xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG524xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG524xlarge = "ml.g5.24xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG548xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG548xlarge = "ml.g5.48xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG6Xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG6Xlarge = "ml.g6.xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG62xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG62xlarge = "ml.g6.2xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG64xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG64xlarge = "ml.g6.4xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG68xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG68xlarge = "ml.g6.8xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG612xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG612xlarge = "ml.g6.12xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG616xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG616xlarge = "ml.g6.16xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG624xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG624xlarge = "ml.g6.24xlarge" + + // OptimizationJobDeploymentInstanceTypeMlG648xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlG648xlarge = "ml.g6.48xlarge" + + // OptimizationJobDeploymentInstanceTypeMlInf2Xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlInf2Xlarge = "ml.inf2.xlarge" + + // OptimizationJobDeploymentInstanceTypeMlInf28xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlInf28xlarge = "ml.inf2.8xlarge" + + // OptimizationJobDeploymentInstanceTypeMlInf224xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlInf224xlarge = "ml.inf2.24xlarge" + + // OptimizationJobDeploymentInstanceTypeMlInf248xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlInf248xlarge = "ml.inf2.48xlarge" + + // OptimizationJobDeploymentInstanceTypeMlTrn12xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlTrn12xlarge = "ml.trn1.2xlarge" + + // OptimizationJobDeploymentInstanceTypeMlTrn132xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlTrn132xlarge = "ml.trn1.32xlarge" + + // OptimizationJobDeploymentInstanceTypeMlTrn1n32xlarge is a OptimizationJobDeploymentInstanceType enum value + OptimizationJobDeploymentInstanceTypeMlTrn1n32xlarge = "ml.trn1n.32xlarge" +) + +// OptimizationJobDeploymentInstanceType_Values returns all elements of the OptimizationJobDeploymentInstanceType enum +func OptimizationJobDeploymentInstanceType_Values() []string { + return []string{ + OptimizationJobDeploymentInstanceTypeMlP4d24xlarge, + OptimizationJobDeploymentInstanceTypeMlP4de24xlarge, + OptimizationJobDeploymentInstanceTypeMlP548xlarge, + OptimizationJobDeploymentInstanceTypeMlG5Xlarge, + OptimizationJobDeploymentInstanceTypeMlG52xlarge, + OptimizationJobDeploymentInstanceTypeMlG54xlarge, + OptimizationJobDeploymentInstanceTypeMlG58xlarge, + OptimizationJobDeploymentInstanceTypeMlG512xlarge, + OptimizationJobDeploymentInstanceTypeMlG516xlarge, + OptimizationJobDeploymentInstanceTypeMlG524xlarge, + OptimizationJobDeploymentInstanceTypeMlG548xlarge, + OptimizationJobDeploymentInstanceTypeMlG6Xlarge, + OptimizationJobDeploymentInstanceTypeMlG62xlarge, + OptimizationJobDeploymentInstanceTypeMlG64xlarge, + OptimizationJobDeploymentInstanceTypeMlG68xlarge, + OptimizationJobDeploymentInstanceTypeMlG612xlarge, + OptimizationJobDeploymentInstanceTypeMlG616xlarge, + OptimizationJobDeploymentInstanceTypeMlG624xlarge, + OptimizationJobDeploymentInstanceTypeMlG648xlarge, + OptimizationJobDeploymentInstanceTypeMlInf2Xlarge, + OptimizationJobDeploymentInstanceTypeMlInf28xlarge, + OptimizationJobDeploymentInstanceTypeMlInf224xlarge, + OptimizationJobDeploymentInstanceTypeMlInf248xlarge, + OptimizationJobDeploymentInstanceTypeMlTrn12xlarge, + OptimizationJobDeploymentInstanceTypeMlTrn132xlarge, + OptimizationJobDeploymentInstanceTypeMlTrn1n32xlarge, + } +} + +const ( + // OptimizationJobStatusInprogress is a OptimizationJobStatus enum value + OptimizationJobStatusInprogress = "INPROGRESS" + + // OptimizationJobStatusCompleted is a OptimizationJobStatus enum value + OptimizationJobStatusCompleted = "COMPLETED" + + // OptimizationJobStatusFailed is a OptimizationJobStatus enum value + OptimizationJobStatusFailed = "FAILED" + + // OptimizationJobStatusStarting is a OptimizationJobStatus enum value + OptimizationJobStatusStarting = "STARTING" + + // OptimizationJobStatusStopping is a OptimizationJobStatus enum value + OptimizationJobStatusStopping = "STOPPING" + + // OptimizationJobStatusStopped is a OptimizationJobStatus enum value + OptimizationJobStatusStopped = "STOPPED" +) + +// OptimizationJobStatus_Values returns all elements of the OptimizationJobStatus enum +func OptimizationJobStatus_Values() []string { + return []string{ + OptimizationJobStatusInprogress, + OptimizationJobStatusCompleted, + OptimizationJobStatusFailed, + OptimizationJobStatusStarting, + OptimizationJobStatusStopping, + OptimizationJobStatusStopped, + } +} + const ( // OrderKeyAscending is a OrderKey enum value OrderKeyAscending = "Ascending" diff --git a/service/sagemaker/sagemakeriface/interface.go b/service/sagemaker/sagemakeriface/interface.go index 9cb7cc386c1..599ab4e398b 100644 --- a/service/sagemaker/sagemakeriface/interface.go +++ b/service/sagemaker/sagemakeriface/interface.go @@ -252,6 +252,10 @@ type SageMakerAPI interface { CreateNotebookInstanceLifecycleConfigWithContext(aws.Context, *sagemaker.CreateNotebookInstanceLifecycleConfigInput, ...request.Option) (*sagemaker.CreateNotebookInstanceLifecycleConfigOutput, error) CreateNotebookInstanceLifecycleConfigRequest(*sagemaker.CreateNotebookInstanceLifecycleConfigInput) (*request.Request, *sagemaker.CreateNotebookInstanceLifecycleConfigOutput) + CreateOptimizationJob(*sagemaker.CreateOptimizationJobInput) (*sagemaker.CreateOptimizationJobOutput, error) + CreateOptimizationJobWithContext(aws.Context, *sagemaker.CreateOptimizationJobInput, ...request.Option) (*sagemaker.CreateOptimizationJobOutput, error) + CreateOptimizationJobRequest(*sagemaker.CreateOptimizationJobInput) (*request.Request, *sagemaker.CreateOptimizationJobOutput) + CreatePipeline(*sagemaker.CreatePipelineInput) (*sagemaker.CreatePipelineOutput, error) CreatePipelineWithContext(aws.Context, *sagemaker.CreatePipelineInput, ...request.Option) (*sagemaker.CreatePipelineOutput, error) CreatePipelineRequest(*sagemaker.CreatePipelineInput) (*request.Request, *sagemaker.CreatePipelineOutput) @@ -476,6 +480,10 @@ type SageMakerAPI interface { DeleteNotebookInstanceLifecycleConfigWithContext(aws.Context, *sagemaker.DeleteNotebookInstanceLifecycleConfigInput, ...request.Option) (*sagemaker.DeleteNotebookInstanceLifecycleConfigOutput, error) DeleteNotebookInstanceLifecycleConfigRequest(*sagemaker.DeleteNotebookInstanceLifecycleConfigInput) (*request.Request, *sagemaker.DeleteNotebookInstanceLifecycleConfigOutput) + DeleteOptimizationJob(*sagemaker.DeleteOptimizationJobInput) (*sagemaker.DeleteOptimizationJobOutput, error) + DeleteOptimizationJobWithContext(aws.Context, *sagemaker.DeleteOptimizationJobInput, ...request.Option) (*sagemaker.DeleteOptimizationJobOutput, error) + DeleteOptimizationJobRequest(*sagemaker.DeleteOptimizationJobInput) (*request.Request, *sagemaker.DeleteOptimizationJobOutput) + DeletePipeline(*sagemaker.DeletePipelineInput) (*sagemaker.DeletePipelineOutput, error) DeletePipelineWithContext(aws.Context, *sagemaker.DeletePipelineInput, ...request.Option) (*sagemaker.DeletePipelineOutput, error) DeletePipelineRequest(*sagemaker.DeletePipelineInput) (*request.Request, *sagemaker.DeletePipelineOutput) @@ -708,6 +716,10 @@ type SageMakerAPI interface { DescribeNotebookInstanceLifecycleConfigWithContext(aws.Context, *sagemaker.DescribeNotebookInstanceLifecycleConfigInput, ...request.Option) (*sagemaker.DescribeNotebookInstanceLifecycleConfigOutput, error) DescribeNotebookInstanceLifecycleConfigRequest(*sagemaker.DescribeNotebookInstanceLifecycleConfigInput) (*request.Request, *sagemaker.DescribeNotebookInstanceLifecycleConfigOutput) + DescribeOptimizationJob(*sagemaker.DescribeOptimizationJobInput) (*sagemaker.DescribeOptimizationJobOutput, error) + DescribeOptimizationJobWithContext(aws.Context, *sagemaker.DescribeOptimizationJobInput, ...request.Option) (*sagemaker.DescribeOptimizationJobOutput, error) + DescribeOptimizationJobRequest(*sagemaker.DescribeOptimizationJobInput) (*request.Request, *sagemaker.DescribeOptimizationJobOutput) + DescribePipeline(*sagemaker.DescribePipelineInput) (*sagemaker.DescribePipelineOutput, error) DescribePipelineWithContext(aws.Context, *sagemaker.DescribePipelineInput, ...request.Option) (*sagemaker.DescribePipelineOutput, error) DescribePipelineRequest(*sagemaker.DescribePipelineInput) (*request.Request, *sagemaker.DescribePipelineOutput) @@ -1191,6 +1203,13 @@ type SageMakerAPI interface { ListNotebookInstancesPages(*sagemaker.ListNotebookInstancesInput, func(*sagemaker.ListNotebookInstancesOutput, bool) bool) error ListNotebookInstancesPagesWithContext(aws.Context, *sagemaker.ListNotebookInstancesInput, func(*sagemaker.ListNotebookInstancesOutput, bool) bool, ...request.Option) error + ListOptimizationJobs(*sagemaker.ListOptimizationJobsInput) (*sagemaker.ListOptimizationJobsOutput, error) + ListOptimizationJobsWithContext(aws.Context, *sagemaker.ListOptimizationJobsInput, ...request.Option) (*sagemaker.ListOptimizationJobsOutput, error) + ListOptimizationJobsRequest(*sagemaker.ListOptimizationJobsInput) (*request.Request, *sagemaker.ListOptimizationJobsOutput) + + ListOptimizationJobsPages(*sagemaker.ListOptimizationJobsInput, func(*sagemaker.ListOptimizationJobsOutput, bool) bool) error + ListOptimizationJobsPagesWithContext(aws.Context, *sagemaker.ListOptimizationJobsInput, func(*sagemaker.ListOptimizationJobsOutput, bool) bool, ...request.Option) error + ListPipelineExecutionSteps(*sagemaker.ListPipelineExecutionStepsInput) (*sagemaker.ListPipelineExecutionStepsOutput, error) ListPipelineExecutionStepsWithContext(aws.Context, *sagemaker.ListPipelineExecutionStepsInput, ...request.Option) (*sagemaker.ListPipelineExecutionStepsOutput, error) ListPipelineExecutionStepsRequest(*sagemaker.ListPipelineExecutionStepsInput) (*request.Request, *sagemaker.ListPipelineExecutionStepsOutput) @@ -1437,6 +1456,10 @@ type SageMakerAPI interface { StopNotebookInstanceWithContext(aws.Context, *sagemaker.StopNotebookInstanceInput, ...request.Option) (*sagemaker.StopNotebookInstanceOutput, error) StopNotebookInstanceRequest(*sagemaker.StopNotebookInstanceInput) (*request.Request, *sagemaker.StopNotebookInstanceOutput) + StopOptimizationJob(*sagemaker.StopOptimizationJobInput) (*sagemaker.StopOptimizationJobOutput, error) + StopOptimizationJobWithContext(aws.Context, *sagemaker.StopOptimizationJobInput, ...request.Option) (*sagemaker.StopOptimizationJobOutput, error) + StopOptimizationJobRequest(*sagemaker.StopOptimizationJobInput) (*request.Request, *sagemaker.StopOptimizationJobOutput) + StopPipelineExecution(*sagemaker.StopPipelineExecutionInput) (*sagemaker.StopPipelineExecutionOutput, error) StopPipelineExecutionWithContext(aws.Context, *sagemaker.StopPipelineExecutionInput, ...request.Option) (*sagemaker.StopPipelineExecutionOutput, error) StopPipelineExecutionRequest(*sagemaker.StopPipelineExecutionInput) (*request.Request, *sagemaker.StopPipelineExecutionOutput)