diff --git a/CHANGELOG.md b/CHANGELOG.md index cc7399708a0..79ebd43d8cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +Release v1.50.10 (2024-02-02) +=== + +### Service Client Updates +* `service/dynamodb`: Updates service API, documentation, waiters, paginators, and examples + * Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. +* `service/sagemaker`: Updates service API and documentation + * Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings. + Release v1.50.9 (2024-02-01) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 10137074c64..530f957bbeb 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -29047,12 +29047,18 @@ var awsPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -29086,6 +29092,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-south-2", }: endpoint{}, @@ -29095,6 +29107,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -29113,6 +29128,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -29128,6 +29161,12 @@ var awsPartition = partition{ endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29137,6 +29176,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -42319,6 +42376,15 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -42328,6 +42394,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 8f06625b54b..71175338077 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.9" +const SDKVersion = "1.50.10" diff --git a/models/apis/dynamodb/2012-08-10/docs-2.json b/models/apis/dynamodb/2012-08-10/docs-2.json index 94e2bd8e342..7515e9877c3 100644 --- a/models/apis/dynamodb/2012-08-10/docs-2.json +++ b/models/apis/dynamodb/2012-08-10/docs-2.json @@ -41,7 +41,7 @@ "PutItem": "
Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues
parameter.
When you add an item, the primary key attributes are the only required attributes.
Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes cannot be empty.
Invalid Requests with empty values will be rejected with a ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists
function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists
function will only succeed if no matching item exists.
For more information about PutItem
, see Working with Items in the Amazon DynamoDB Developer Guide.
You must provide the name of the partition key attribute and a single value for that attribute. Query
returns all items with that partition key value. Optionally, you can provide a sort key attribute and use a comparison operator to refine the search results.
Use the KeyConditionExpression
parameter to provide a specific value for the partition key. The Query
operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query
operation by specifying a sort key value and a comparison operator in KeyConditionExpression
. To further refine the Query
results, you can optionally provide a FilterExpression
. A FilterExpression
determines which items within the results should be returned to you. All of the other results are discarded.
A Query
operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.
DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression
.
Query
results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
FilterExpression
is applied after a Query
finishes, but before the results are returned. A FilterExpression
cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression
.
A Query
operation can return an empty result set and a LastEvaluatedKey
if all the items read for the page of results are filtered out.
You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead
parameter to true
and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead
when querying a global secondary index.
Creates a new table from an existing backup. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
The Scan
operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total size of scanned items exceeds the maximum dataset size limit of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey
value is also returned and the requestor can use the LastEvaluatedKey
to continue the scan in a subsequent operation. Each scan response also includes number of items that were scanned (ScannedCount) as part of the request. If using a FilterExpression
, a scan result can result in no items meeting the criteria and the Count
will result in zero. If you did not use a FilterExpression
in the scan request, then Count
is the same as ScannedCount
.
Count
and ScannedCount
only return the count of items specific to a single scan request and, unless the table is less than 1MB, do not represent the total number of items in the table.
A single Scan
operation first reads up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then applies any filtering to the results if a FilterExpression
is provided. If LastEvaluatedKey
is present in the response, pagination is required to complete the full table scan. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan
operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan
operation by providing the Segment
and TotalSegments
parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
By default, a Scan
uses eventually consistent reads when accessing the items in a table. Therefore, the results from an eventually consistent Scan
may not include the latest item changes at the time the scan iterates through each item in the table. If you require a strongly consistent read of each item as the scan iterates through the items in the table, you can set the ConsistentRead
parameter to true. Strong consistency only relates to the consistency of the read at the item level.
DynamoDB does not provide snapshot isolation for a scan operation when the ConsistentRead
parameter is set to true. Thus, a DynamoDB scan operation does not guarantee that all reads in a scan see a consistent snapshot of the table when the scan operation was requested.
Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", "TransactGetItems": " TransactGetItems
is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems
call can contain up to 100 TransactGetItem
objects, each of which contains a Get
structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems
cannot retrieve items from tables in more than one Amazon Web Services account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.
DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
A conflicting operation is in the process of updating an item to be read.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
The aggregate size of the items in the transaction exceeded 4 MB.
The settings for DynamoDB Streams on the table. These settings consist of:
StreamEnabled
- Indicates whether DynamoDB Streams is to be enabled (true) or disabled (false).
StreamViewType
- When an item in the table is modified, StreamViewType
determines what information is written to the table's stream. Valid values for StreamViewType
are:
KEYS_ONLY
- Only the key attributes of the modified item are written to the stream.
NEW_IMAGE
- The entire item, as it appears after it was modified, is written to the stream.
OLD_IMAGE
- The entire item, as it appeared before it was modified, is written to the stream.
NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are written to the stream.
Stream settings on the table when the backup was created.
", "TableDescription$StreamSpecification": "The current DynamoDB Streams configuration for the table.
", - "UpdateTableInput$StreamSpecification": "Represents the DynamoDB Streams configuration for the table.
You receive a ResourceInUseException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Represents the DynamoDB Streams configuration for the table.
You receive a ValidationException
if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
Creates an action. An action is a lineage tracking entity that represents an action or activity. For example, a model deployment or an HPO job. Generally, an action involves at least one input or output artifact. For more information, see Amazon SageMaker ML Lineage Tracking.
", "CreateAlgorithm": "Create a machine learning algorithm that you can use in SageMaker and list in the Amazon Web Services Marketplace.
", "CreateApp": "Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may have multiple Apps active simultaneously.
", - "CreateAppImageConfig": "Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.
", + "CreateAppImageConfig": "Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image.
", "CreateArtifact": "Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see Amazon SageMaker ML Lineage Tracking.
", "CreateAutoMLJob": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility.
CreateAutoMLJobV2
can manage tabular problem types identical to those of its previous version CreateAutoMLJob
, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
", "CreateAutoMLJobV2": "Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility.
CreateAutoMLJobV2
can manage tabular problem types identical to those of its previous version CreateAutoMLJob
, as well as time-series forecasting, non-tabular problem types such as image or text classification, and text generation (LLMs fine-tuning).
Find guidelines about how to migrate a CreateAutoMLJob
to CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
For the list of available problem types supported by CreateAutoMLJobV2
, see AutoMLProblemTypeConfig.
You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
", @@ -31,8 +31,8 @@ "CreateHub": "Create a hub.
Hub APIs are only callable through SageMaker Studio.
Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area.
", "CreateHyperParameterTuningJob": "Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured by an objective metric that you choose.
A hyperparameter tuning job automatically creates Amazon SageMaker experiments, trials, and trial components for each training job that it runs. You can view these entities in Amazon SageMaker Studio. For more information, see View Experiments, Trials, and Trial Components.
Do not include any security-sensitive information including account access IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your training job request and return an exception error.
Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon Elastic Container Registry (ECR). For more information, see Bring your own SageMaker image.
", - "CreateImageVersion": "Creates a version of the SageMaker image specified by ImageName
. The version represents the Amazon Elastic Container Registry (ECR) container image specified by BaseImage
.
Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image.
", + "CreateImageVersion": "Creates a version of the SageMaker image specified by ImageName
. The version represents the Amazon ECR container image specified by BaseImage
.
Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where each inference component contains one model and the resource utilization needs for that individual model. After you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint API action.
", "CreateInferenceExperiment": "Creates an inference experiment using the configurations specified in the request.
Use this API to setup and schedule an experiment to compare model variants on a Amazon SageMaker inference endpoint. For more information about inference experiments, see Shadow tests.
Amazon SageMaker begins your experiment at the scheduled time and routes traffic to your endpoint's model variants based on your specified configuration.
While the experiment is in progress or after it has concluded, you can view metrics that compare your model variants. For more information, see View, monitor, and edit shadow tests.
", "CreateInferenceRecommendationsJob": "Starts a recommendation job. You can create either an instance recommendation or load test job.
", @@ -47,19 +47,19 @@ "CreateModelQualityJobDefinition": "Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.
", "CreateMonitoringSchedule": "Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an Amazon SageMaker Endpoint.
", "CreateNotebookInstance": "Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running on a Jupyter notebook.
In a CreateNotebookInstance
request, specify the type of ML compute instance that you want to run. SageMaker launches the instance, installs common libraries that you can use to explore datasets for model training, and attaches an ML storage volume to the notebook instance.
SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a specific algorithm or with a machine learning framework.
After receiving the request, SageMaker does the following:
Creates a network interface in the SageMaker VPC.
(Option) If you specified SubnetId
, SageMaker creates a network interface in your own VPC, which is inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker attaches the security group that you specified in the request to the network interface that it creates in your VPC.
Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified SubnetId
of your VPC, SageMaker specifies both network interfaces when launching this instance. This enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.
After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name of a notebook instance after you create it.
After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter notebooks. For example, you can write code to explore a dataset that you can use for model training, train a model, host models by creating SageMaker endpoints, and validate hosted models.
For more information, see How It Works.
", - "CreateNotebookInstanceLifecycleConfig": "Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", + "CreateNotebookInstanceLifecycleConfig": "Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", "CreatePipeline": "Creates a pipeline using a JSON pipeline definition.
", - "CreatePresignedDomainUrl": "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.
The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.
You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .
The URL that you get from a call to CreatePresignedDomainUrl
has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds
. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.
Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.
The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.
You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .
The URL that you get from a call to CreatePresignedDomainUrl
has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds
. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.
Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open
next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.
The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.
You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress
condition operator and the aws:SourceIP
condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.
The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page.
Creates a processing job.
", "CreateProject": "Creates a machine learning (ML) project that can contain one or more templates that set up an ML pipeline from training to deploying an approved model.
", - "CreateSpace": "Creates a space used for real time collaboration in a Domain.
", + "CreateSpace": "Creates a space used for real time collaboration in a domain.
", "CreateStudioLifecycleConfig": "Creates a new Amazon SageMaker Studio Lifecycle Configuration.
", "CreateTrainingJob": "Starts a model training job. After training completes, SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.
If you choose to host your model using SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than SageMaker, provided that you know how to use them for inference.
In the request body, you provide the following:
AlgorithmSpecification
- Identifies the training algorithm to use.
HyperParameters
- Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by SageMaker, see Algorithms.
Do not include any security-sensitive information including account access IDs, secrets or tokens in any hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your training job request and return an exception error.
InputDataConfig
- Describes the input required by the training job and the Amazon S3, EFS, or FSx location where it is stored.
OutputDataConfig
- Identifies the Amazon S3 bucket where you want SageMaker to save the results of model training.
ResourceConfig
- Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.
EnableManagedSpotTraining
- Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.
RoleArn
- The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that SageMaker can successfully complete model training.
StoppingCondition
- To help cap training costs, use MaxRuntimeInSeconds
to set a time limit for training. Use MaxWaitTimeInSeconds
to specify how long a managed spot training job has to complete.
Environment
- The environment variables to set in the Docker container.
RetryStrategy
- The number of times to retry the job when the job fails due to an InternalServerError
.
For more information about SageMaker, see How It Works.
", "CreateTransformJob": "Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.
To perform batch transformations, you create a transform job and use the data that you have readily available.
In the request body, you provide the following:
TransformJobName
- Identifies the transform job. The name must be unique within an Amazon Web Services Region in an Amazon Web Services account.
ModelName
- Identifies the model to use. ModelName
must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services account. For information on creating a model, see CreateModel.
TransformInput
- Describes the dataset to be transformed and the Amazon S3 location where it is stored.
TransformOutput
- Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.
TransformResources
- Identifies the ML compute instances for the transform job.
For more information about how batch transformation works, see Batch Transform.
", "CreateTrial": "Creates an SageMaker trial. A trial is a set of steps called trial components that produce a machine learning model. A trial is part of a single SageMaker experiment.
When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.
You can add tags to a trial and then use the Search API to search for the tags.
To get a list of all your trials, call the ListTrials API. To view a trial's properties, call the DescribeTrial API. To create a trial component, call the CreateTrialComponent API.
", "CreateTrialComponent": "Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one or more trial components. A trial component can be used in multiple trials.
Trial components include pre-processing jobs, training jobs, and batch transform jobs.
When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by the SDK.
You can add tags to a trial component and then use the Search API to search for the tags.
", - "CreateUserProfile": "Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity Center, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.
", + "CreateUserProfile": "Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity Center, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System home directory.
", "CreateWorkforce": "Use this operation to create a workforce. This operation will return an error if a workforce already exists in the Amazon Web Services Region that you specify. You can only create one workforce in each Amazon Web Services Region per Amazon Web Services account.
If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use the DeleteWorkforce API operation to delete the existing workforce and then use CreateWorkforce
to create a new workforce.
To create a private workforce using Amazon Cognito, you must specify a Cognito user pool in CognitoConfig
. You can also create an Amazon Cognito workforce using the Amazon SageMaker console. For more information, see Create a Private Workforce (Amazon Cognito).
To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP configuration in OidcConfig
. Your OIDC IdP must support groups because groups are used by Ground Truth and Amazon A2I to create work teams. For more information, see Create a Private Workforce (OIDC IdP).
Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.
You cannot create more than 25 work teams in an account and region.
", "DeleteAction": "Deletes an action.
", @@ -590,10 +590,10 @@ "AppImageConfigArn": { "base": null, "refs": { - "AppImageConfigDetails$AppImageConfigArn": "The Amazon Resource Name (ARN) of the AppImageConfig.
", - "CreateAppImageConfigResponse$AppImageConfigArn": "The Amazon Resource Name (ARN) of the AppImageConfig.
", - "DescribeAppImageConfigResponse$AppImageConfigArn": "The Amazon Resource Name (ARN) of the AppImageConfig.
", - "UpdateAppImageConfigResponse$AppImageConfigArn": "The Amazon Resource Name (ARN) for the AppImageConfig.
" + "AppImageConfigDetails$AppImageConfigArn": "The ARN of the AppImageConfig.
", + "CreateAppImageConfigResponse$AppImageConfigArn": "The ARN of the AppImageConfig.
", + "DescribeAppImageConfigResponse$AppImageConfigArn": "The ARN of the AppImageConfig.
", + "UpdateAppImageConfigResponse$AppImageConfigArn": "The ARN for the AppImageConfig.
" } }, "AppImageConfigDetails": { @@ -3422,11 +3422,11 @@ } }, "DefaultSpaceSettings": { - "base": "A collection of settings that apply to spaces created in the Domain.
", + "base": "A collection of settings that apply to spaces created in the domain.
", "refs": { "CreateDomainRequest$DefaultSpaceSettings": "The default settings used to create a space.
", "DescribeDomainResponse$DefaultSpaceSettings": "The default settings used to create a space.
", - "UpdateDomainRequest$DefaultSpaceSettings": "The default settings used to create a space within the Domain.
" + "UpdateDomainRequest$DefaultSpaceSettings": "The default settings used to create a space within the domain.
" } }, "DefaultSpaceStorageSettings": { @@ -4763,27 +4763,27 @@ "AppDetails$DomainId": "The domain ID.
", "CreateAppRequest$DomainId": "The domain ID.
", "CreatePresignedDomainUrlRequest$DomainId": "The domain ID.
", - "CreateSpaceRequest$DomainId": "The ID of the associated Domain.
", + "CreateSpaceRequest$DomainId": "The ID of the associated domain.
", "CreateUserProfileRequest$DomainId": "The ID of the associated Domain.
", "DeleteAppRequest$DomainId": "The domain ID.
", "DeleteDomainRequest$DomainId": "The domain ID.
", - "DeleteSpaceRequest$DomainId": "The ID of the associated Domain.
", + "DeleteSpaceRequest$DomainId": "The ID of the associated domain.
", "DeleteUserProfileRequest$DomainId": "The domain ID.
", "DescribeAppRequest$DomainId": "The domain ID.
", "DescribeAppResponse$DomainId": "The domain ID.
", "DescribeDomainRequest$DomainId": "The domain ID.
", "DescribeDomainResponse$DomainId": "The domain ID.
", - "DescribeSpaceRequest$DomainId": "The ID of the associated Domain.
", - "DescribeSpaceResponse$DomainId": "The ID of the associated Domain.
", + "DescribeSpaceRequest$DomainId": "The ID of the associated domain.
", + "DescribeSpaceResponse$DomainId": "The ID of the associated domain.
", "DescribeUserProfileRequest$DomainId": "The domain ID.
", "DescribeUserProfileResponse$DomainId": "The ID of the domain that contains the profile.
", "DomainDetails$DomainId": "The domain ID.
", "ListAppsRequest$DomainIdEquals": "A parameter to search for the domain ID.
", - "ListSpacesRequest$DomainIdEquals": "A parameter to search for the Domain ID.
", + "ListSpacesRequest$DomainIdEquals": "A parameter to search for the domain ID.
", "ListUserProfilesRequest$DomainIdEquals": "A parameter by which to filter the results.
", - "SpaceDetails$DomainId": "The ID of the associated Domain.
", + "SpaceDetails$DomainId": "The ID of the associated domain.
", "UpdateDomainRequest$DomainId": "The ID of the domain to be updated.
", - "UpdateSpaceRequest$DomainId": "The ID of the associated Domain.
", + "UpdateSpaceRequest$DomainId": "The ID of the associated domain.
", "UpdateUserProfileRequest$DomainId": "The domain ID.
", "UserProfileDetails$DomainId": "The domain ID.
" } @@ -5078,8 +5078,8 @@ "EfsUid": { "base": null, "refs": { - "DescribeSpaceResponse$HomeEfsFileSystemUid": "The ID of the space's profile in the Amazon Elastic File System volume.
", - "DescribeUserProfileResponse$HomeEfsFileSystemUid": "The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
" + "DescribeSpaceResponse$HomeEfsFileSystemUid": "The ID of the space's profile in the Amazon EFS volume.
", + "DescribeUserProfileResponse$HomeEfsFileSystemUid": "The ID of the user's profile in the Amazon Elastic File System volume.
" } }, "EnableCapture": { @@ -5983,10 +5983,10 @@ } }, "FileSystemConfig": { - "base": "The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.
", + "base": "The Amazon Elastic File System storage configuration for a SageMaker image.
", "refs": { "JupyterLabAppImageConfig$FileSystemConfig": null, - "KernelGatewayImageConfig$FileSystemConfig": "The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.
" + "KernelGatewayImageConfig$FileSystemConfig": "The Amazon Elastic File System storage configuration for a SageMaker image.
" } }, "FileSystemDataSource": { @@ -6222,6 +6222,12 @@ "TabularJobConfig$GenerateCandidateDefinitionsOnly": "Generates possible candidates without training the models. A model candidate is a combination of data preprocessors, algorithms, and algorithm parameter settings.
" } }, + "GenerativeAiSettings": { + "base": "The generative AI settings for the SageMaker Canvas application.
Configure these settings for Canvas users starting chats with generative AI foundation models. For more information, see Use generative AI with foundation models.
", + "refs": { + "CanvasAppSettings$GenerativeAiSettings": "The generative AI settings for the SageMaker Canvas application.
" + } + }, "GetDeviceFleetReportRequest": { "base": null, "refs": { @@ -6926,7 +6932,7 @@ "HyperbandStrategyMaxResource": { "base": null, "refs": { - "HyperbandStrategyConfig$MaxResource": "The maximum number of resources (such as epochs) that can be used by a training job launched by a hyperparameter tuning job. Once a job reaches the MaxResource
value, it is stopped. If a value for MaxResource
is not provided, and Hyperband
is selected as the hyperparameter tuning strategy, HyperbandTrainingJ
attempts to infer MaxResource
from the following keys (if present) in StaticsHyperParameters:
epochs
numepochs
n-epochs
n_epochs
num_epochs
If HyperbandStrategyConfig
is unable to infer a value for MaxResource
, it generates a validation error. The maximum value is 20,000 epochs. All metrics that correspond to an objective metric are used to derive early stopping decisions. For distributive training jobs, ensure that duplicate metrics are not printed in the logs across the individual nodes in a training job. If multiple nodes are publishing duplicate or incorrect metrics, training jobs may make an incorrect stopping decision and stop the job prematurely.
The maximum number of resources (such as epochs) that can be used by a training job launched by a hyperparameter tuning job. Once a job reaches the MaxResource
value, it is stopped. If a value for MaxResource
is not provided, and Hyperband
is selected as the hyperparameter tuning strategy, HyperbandTraining
attempts to infer MaxResource
from the following keys (if present) in StaticsHyperParameters:
epochs
numepochs
n-epochs
n_epochs
num_epochs
If HyperbandStrategyConfig
is unable to infer a value for MaxResource
, it generates a validation error. The maximum value is 20,000 epochs. All metrics that correspond to an objective metric are used to derive early stopping decisions. For distributed training jobs, ensure that duplicate metrics are not printed in the logs across the individual nodes in a training job. If multiple nodes are publishing duplicate or incorrect metrics, training jobs may make an incorrect stopping decision and stop the job prematurely.
The registry path of the container image to use as the starting point for this version. The path is an Amazon Elastic Container Registry (ECR) URI in the following format:
<acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]>
The registry path of the container image to use as the starting point for this version. The path is an Amazon ECR URI in the following format:
<acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]>
The registry path of the container image on which this image version is based.
" } }, @@ -7798,7 +7804,7 @@ "refs": { "AsyncInferenceOutputConfig$KmsKeyId": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker uses to encrypt the asynchronous inference output in Amazon S3.
", "AthenaDatasetDefinition$KmsKeyId": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data generated from an Athena query execution.
", - "AutoMLOutputDataConfig$KmsKeyId": "The Key Management Service (KMS) encryption key ID.
", + "AutoMLOutputDataConfig$KmsKeyId": "The Key Management Service encryption key ID.
", "AutoMLSecurityConfig$VolumeKmsKeyId": "The key used to encrypt stored data.
", "BatchDataCaptureConfig$KmsKeyId": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the batch transform job.
The KmsKeyId can be any of the following formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Alias name: alias/ExampleAlias
Alias name ARN: arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
Use KmsKeyId
.
Contains the notebook instance lifecycle configuration script.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", + "base": "Contains the notebook instance lifecycle configuration script.
Each lifecycle configuration script has a limit of 16384 characters.
The value of the $PATH
environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances
in log stream [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.
For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.
", "refs": { "NotebookInstanceLifecycleConfigList$member": null } @@ -11617,11 +11623,11 @@ "ProblemType": { "base": null, "refs": { - "CreateAutoMLJobRequest$ProblemType": "Defines the type of supervised learning problem available for the candidates. For more information, see Amazon SageMaker Autopilot problem types.
", + "CreateAutoMLJobRequest$ProblemType": "Defines the type of supervised learning problem available for the candidates. For more information, see SageMaker Autopilot problem types.
", "DescribeAutoMLJobResponse$ProblemType": "Returns the job's problem type.
", "ResolvedAttributes$ProblemType": "The problem type.
", - "TabularJobConfig$ProblemType": "The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.
You must either specify the type of supervised learning problem in ProblemType
and provide the AutoMLJobObjective metric, or none at all.
The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see Amazon SageMaker Autopilot problem types.
" + "TabularJobConfig$ProblemType": "The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see SageMaker Autopilot problem types.
You must either specify the type of supervised learning problem in ProblemType
and provide the AutoMLJobObjective metric, or none at all.
The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see SageMaker Autopilot problem types.
" } }, "ProcessingClusterConfig": { @@ -12811,7 +12817,7 @@ "DescribeAppResponse$ResourceSpec": "The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.
", "JupyterLabAppSettings$DefaultResourceSpec": null, "JupyterServerAppSettings$DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the LifecycleConfigArns
parameter, then this parameter is also required.
The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.
The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the Amazon Web Services Command Line Interface or Amazon Web Services CloudFormation and the instance type parameter value is not passed.
The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.
The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed.
The retention policy for data stored on an Amazon Elastic File System (EFS) volume.
", + "base": "The retention policy for data stored on an Amazon Elastic File System volume.
", "refs": { "DeleteDomainRequest$RetentionPolicy": "The retention policy for this domain, which specifies whether resources will be retained after the Domain is deleted. By default, all resources are retained (not automatically deleted).
" } @@ -12849,7 +12855,7 @@ "RetentionType": { "base": null, "refs": { - "RetentionPolicy$HomeEfsFileSystem": "The default is Retain
, which specifies to keep the data stored on the EFS volume.
Specify Delete
to delete the data stored on the EFS volume.
The default is Retain
, which specifies to keep the data stored on the Amazon EFS volume.
Specify Delete
to delete the data stored on the Amazon EFS volume.
The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "CreateTrainingJobRequest$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform tasks on your behalf.
During model training, SageMaker needs your permission to read input data from an S3 bucket, download a Docker image that contains training code, write model artifacts to an S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant permissions for all of these tasks to an IAM role. For more information, see SageMaker Roles.
To be able to pass this role to SageMaker, the caller of this API must have the iam:PassRole
permission.
The ARN of the execution role for the space.
", - "DescribeAutoMLJobResponse$RoleArn": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.
", - "DescribeAutoMLJobV2Response$RoleArn": "The ARN of the Identity and Access Management role that has read permission to the input data location and write permission to the output data location in Amazon S3.
", + "DescribeAutoMLJobResponse$RoleArn": "The ARN of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.
", + "DescribeAutoMLJobV2Response$RoleArn": "The ARN of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.
", "DescribeCompilationJobResponse$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker assumes to perform the model compilation job.
", "DescribeDataQualityJobDefinitionResponse$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "DescribeDeviceFleetResponse$RoleArn": "The Amazon Resource Name (ARN) that has access to Amazon Web Services Internet of Things (IoT).
", @@ -12921,6 +12927,7 @@ "DescribeProcessingJobResponse$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that Amazon SageMaker can assume to perform tasks on your behalf.
", "DescribeTrainingJobResponse$RoleArn": "The Amazon Web Services Identity and Access Management (IAM) role configured for the training job.
", "FeatureGroup$RoleArn": "The Amazon Resource Name (ARN) of the IAM execution role used to create the feature group.
", + "GenerativeAiSettings$AmazonBedrockRoleArn": "The ARN of an Amazon Web Services IAM role that allows fine-tuning of large language models (LLMs) in Amazon Bedrock. The IAM role should have Amazon S3 read and write permissions, as well as a trust relationship that establishes bedrock.amazonaws.com
as a service principal.
The Amazon Resource Name (ARN) of the IAM role associated with the training jobs that the tuning job launches.
", "InferenceExperimentSummary$RoleArn": "The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage Amazon SageMaker Inference endpoints for model deployment.
", "InferenceRecommendationsJob$RoleArn": "The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.
", @@ -13267,7 +13274,7 @@ "base": null, "refs": { "CreateNotebookInstanceInput$SecurityGroupIds": "The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.
", - "DefaultSpaceSettings$SecurityGroups": "The security group IDs for the Amazon Virtual Private Cloud that the space uses for communication.
", + "DefaultSpaceSettings$SecurityGroups": "The security group IDs for the Amazon VPC that the space uses for communication.
", "DescribeNotebookInstanceOutput$SecurityGroups": "The IDs of the VPC security groups.
", "UserSettings$SecurityGroups": "The security groups for the Amazon Virtual Private Cloud (VPC) that the domain uses for communication.
Optional when the CreateDomain.AppNetworkAccessType
parameter is set to PublicInternetOnly
.
Required when the CreateDomain.AppNetworkAccessType
parameter is set to VpcOnly
, unless specified as part of the DefaultUserSettings
for the domain.
Amazon SageMaker adds a security group to allow NFS traffic from Amazon SageMaker Studio. Therefore, the number of security groups that you can specify is one less than the maximum number shown.
" } @@ -16316,7 +16323,7 @@ "VisibilityConditionsKey": { "base": null, "refs": { - "VisibilityConditions$Key": "The key for that specifies the tag that you're using to filter the search results. The key must start with Tags.
.
The key that specifies the tag that you're using to filter the search results. It must be in the following format: Tags.<key>/EqualsIfExists
.