diff --git a/CHANGELOG.md b/CHANGELOG.md
index a659a106005..dd6f94fc908 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,13 @@
+Release v1.42.3 (2021-11-11)
+===
+
+### Service Client Updates
+* `service/dynamodb`: Updates service API, documentation, waiters, paginators, and examples
+ * Updated Help section for "dynamodb update-contributor-insights" API
+* `service/ec2`: Updates service API and documentation
+ * This release provides an additional route target for the VPC route table.
+* `service/translate`: Updates service API and documentation
+
Release v1.42.2 (2021-11-10)
===
diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go
index bc91d275318..ea12126a711 100644
--- a/aws/endpoints/defaults.go
+++ b/aws/endpoints/defaults.go
@@ -19383,6 +19383,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fips.transcribe.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19398,6 +19404,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "fips.transcribe.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -24429,6 +24444,14 @@ var awsusgovPartition = partition{
},
},
"identitystore": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "identitystore.{region}.{dnsSuffix}",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-west-1",
@@ -25238,6 +25261,14 @@ var awsusgovPartition = partition{
},
},
"rds": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.{region}.{dnsSuffix}",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "rds.us-gov-east-1",
@@ -25492,6 +25523,14 @@ var awsusgovPartition = partition{
},
},
"runtime.sagemaker": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "runtime.sagemaker.{region}.{dnsSuffix}",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
diff --git a/aws/version.go b/aws/version.go
index 086452c757c..27a961c1a9c 100644
--- a/aws/version.go
+++ b/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.42.2"
+const SDKVersion = "1.42.3"
diff --git a/models/apis/dynamodb/2012-08-10/docs-2.json b/models/apis/dynamodb/2012-08-10/docs-2.json
index 10d69f241aa..ef6e3f5ce5f 100644
--- a/models/apis/dynamodb/2012-08-10/docs-2.json
+++ b/models/apis/dynamodb/2012-08-10/docs-2.json
@@ -1,13 +1,13 @@
{
"version": "2.0",
- "service": "
Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling.
With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the AWS Management Console to monitor resource utilization and performance metrics.
DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an AWS region, providing built-in high availability and data durability.
", + "service": "Amazon DynamoDB is a fully managed NoSQL database service that provides fast and predictable performance with seamless scalability. DynamoDB lets you offload the administrative burdens of operating and scaling a distributed database, so that you don't have to worry about hardware provisioning, setup and configuration, replication, software patching, or cluster scaling.
With DynamoDB, you can create database tables that can store and retrieve any amount of data, and serve any level of request traffic. You can scale up or scale down your tables' throughput capacity without downtime or performance degradation, and use the Amazon Web Services Management Console to monitor resource utilization and performance metrics.
DynamoDB automatically spreads the data and traffic for your tables over a sufficient number of servers to handle your throughput and storage requirements, while maintaining consistent and fast performance. All of your data is stored on solid state disks (SSDs) and automatically replicated across multiple Availability Zones in an Amazon Web Services Region, providing built-in high availability and data durability.
", "operations": { - "BatchExecuteStatement": "This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL.
", + "BatchExecuteStatement": "This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL.
The entire batch must consist of either read statements or write statements, you cannot mix both in one batch.
The BatchGetItem
operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem
returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys
. You can use this value to retry the operation starting with the next item to get.
If you request more than 100 items, BatchGetItem
returns a ValidationException
with the message \"Too many items requested for the BatchGetItem call.\"
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys
value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset.
If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem
returns a ProvisionedThroughputExceededException
. If at least one of the items is successfully processed, then BatchGetItem
completes successfully, while returning the keys of the unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.
For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.
By default, BatchGetItem
performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
retrieves items in parallel.
When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
", "BatchWriteItem": "The BatchWriteItem
operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem
can write up to 16 MB of data, which can comprise as many as 25 put or delete requests. Individual items to be written can be as large as 400 KB.
BatchWriteItem
cannot update items. To update items, use the UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified in BatchWriteItem
are atomic; however BatchWriteItem
as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems
response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem
in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem
request with those unprocessed items until all items have been processed.
If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem
returns a ProvisionedThroughputExceededException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed.
For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide.
With BatchWriteItem
, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem
does not behave in the same way as individual PutItem
and DeleteItem
calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem
performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.
If one or more of the following is true, DynamoDB rejects the entire batch write operation:
One or more tables specified in the BatchWriteItem
request does not exist.
Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.
You try to perform multiple operations on the same item in the same BatchWriteItem
request. For example, you cannot put and delete the same item in the same BatchWriteItem
request.
Your request contains at least two items with identical hash and range keys (which essentially is two put operations).
There are more than 25 requests in the batch.
Any individual item in a batch exceeds 400 KB.
The total request size exceeds 16 MB.
Creates a backup for an existing table.
Each time you create an on-demand backup, the entire table data is backed up. There is no limit to the number of on-demand backups that can be taken.
When you create an on-demand backup, a time marker of the request is cataloged, and the backup is created asynchronously, by applying all changes until the time of the request to the last full table snapshot. Backup requests are processed instantaneously and become available for restore within minutes.
You can call CreateBackup
at a maximum rate of 50 times per second.
All backups in DynamoDB work without consuming any provisioned throughput on the table.
If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to contain all data committed to the table up to 14:24:00, and data committed after 14:26:00 will not be. The backup might contain data modifications made between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.
Along with data, the following are also included on the backups:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Streams
Provisioned read and write capacity
Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions.
This operation only applies to Version 2017.11.29 of global tables.
If you want to add a new replica table to a global table, each of the following conditions must be true:
The table must have the same primary key as all of the other replicas.
The table must have the same name as all of the other replicas.
The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item.
None of the replica tables in the global table can contain any data.
If global secondary indexes are specified, then the following conditions must also be met:
The global secondary indexes must have the same name.
The global secondary indexes must have the same hash key and sort key (if present).
If local secondary indexes are specified, then the following conditions must also be met:
The local secondary indexes must have the same name.
The local secondary indexes must have the same hash key and sort key (if present).
Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes.
If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table.
The CreateTable
operation adds a new table to your account. In an AWS account, table names must be unique within each Region. That is, you can have two tables with same name if you create the tables in different Regions.
CreateTable
is an asynchronous operation. Upon receiving a CreateTable
request, DynamoDB immediately returns a response with a TableStatus
of CREATING
. After the table is created, DynamoDB sets the TableStatus
to ACTIVE
. You can perform read and write operations only on an ACTIVE
table.
You can optionally define secondary indexes on the new table, as part of the CreateTable
operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING
state at any given time.
You can use the DescribeTable
action to check the table status.
The CreateTable
operation adds a new table to your account. In an Amazon Web Services account, table names must be unique within each Region. That is, you can have two tables with same name if you create the tables in different Regions.
CreateTable
is an asynchronous operation. Upon receiving a CreateTable
request, DynamoDB immediately returns a response with a TableStatus
of CREATING
. After the table is created, DynamoDB sets the TableStatus
to ACTIVE
. You can perform read and write operations only on an ACTIVE
table.
You can optionally define secondary indexes on the new table, as part of the CreateTable
operation. If you want to create multiple tables with secondary indexes on them, you must create the tables sequentially. Only one table with secondary indexes can be in the CREATING
state at any given time.
You can use the DescribeTable
action to check the table status.
Deletes an existing backup of a table.
You can call DeleteBackup
at a maximum rate of 10 times per second.
Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.
In addition to deleting an item, you can also return the item's attribute values in the same operation, using the ReturnValues
parameter.
Unless you specify conditions, the DeleteItem
is an idempotent operation; running it multiple times on the same item or attribute does not result in an error response.
Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.
", "DeleteTable": "The DeleteTable
operation deletes a table and all of its items. After a DeleteTable
request, the specified table is in the DELETING
state until DynamoDB completes the deletion. If the table is in the ACTIVE
state, you can delete it. If a table is in CREATING
or UPDATING
states, then DynamoDB returns a ResourceInUseException
. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException
. If table is already in the DELETING
state, no error is returned.
DynamoDB might continue to accept data read and write operations, such as GetItem
and PutItem
, on a table in the DELETING
state until the table deletion is complete.
When you delete a table, any indexes on that table are also deleted.
If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED
state, and the stream is automatically deleted after 24 hours.
Use the DescribeTable
action to check the status of the table.
Returns information about the specified global table.
This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use DescribeTable instead.
Describes Region-specific settings for a global table.
This operation only applies to Version 2017.11.29 of global tables.
Returns information about the status of Kinesis streaming.
", - "DescribeLimits": "Returns the current provisioned-capacity quotas for your AWS account in a Region, both for the Region as a whole and for any one DynamoDB table that you create there.
When you establish an AWS account, the account has initial quotas on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are per-table quotas that apply when you create a table there. For more information, see Service, Account, and Table Quotas page in the Amazon DynamoDB Developer Guide.
Although you can increase these quotas by filing a case at AWS Support Center, obtaining the increase is not instantaneous. The DescribeLimits
action lets you write code to compare the capacity you are currently using to those quotas imposed by your account so that you have enough time to apply for an increase before you hit a quota.
For example, you could use one of the AWS SDKs to do the following:
Call DescribeLimits
for a particular Region to obtain your current account quotas on provisioned capacity there.
Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and one to hold the aggregate write capacity units. Zero them both.
Call ListTables
to obtain a list of all your DynamoDB tables.
For each table name listed by ListTables
, do the following:
Call DescribeTable
with the table name.
Use the data returned by DescribeTable
to add the read capacity units and write capacity units provisioned for the table itself to your variables.
If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.
Report the account quotas for that Region returned by DescribeLimits
, along with the total current provisioned capacity levels you have calculated.
This will let you see whether you are getting close to your account-level quotas.
The per-table quotas apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.
For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly, but the only quota that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account quotas.
DescribeLimits
should only be called periodically. You can expect throttling errors if you call it more than once in a minute.
The DescribeLimits
Request element has no content.
Returns the current provisioned-capacity quotas for your Amazon Web Services account in a Region, both for the Region as a whole and for any one DynamoDB table that you create there.
When you establish an Amazon Web Services account, the account has initial quotas on the maximum read capacity units and write capacity units that you can provision across all of your DynamoDB tables in a given Region. Also, there are per-table quotas that apply when you create a table there. For more information, see Service, Account, and Table Quotas page in the Amazon DynamoDB Developer Guide.
Although you can increase these quotas by filing a case at Amazon Web Services Support Center, obtaining the increase is not instantaneous. The DescribeLimits
action lets you write code to compare the capacity you are currently using to those quotas imposed by your account so that you have enough time to apply for an increase before you hit a quota.
For example, you could use one of the Amazon Web Services SDKs to do the following:
Call DescribeLimits
for a particular Region to obtain your current account quotas on provisioned capacity there.
Create a variable to hold the aggregate read capacity units provisioned for all your tables in that Region, and one to hold the aggregate write capacity units. Zero them both.
Call ListTables
to obtain a list of all your DynamoDB tables.
For each table name listed by ListTables
, do the following:
Call DescribeTable
with the table name.
Use the data returned by DescribeTable
to add the read capacity units and write capacity units provisioned for the table itself to your variables.
If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.
Report the account quotas for that Region returned by DescribeLimits
, along with the total current provisioned capacity levels you have calculated.
This will let you see whether you are getting close to your account-level quotas.
The per-table quotas apply only when you are creating a new table. They restrict the sum of the provisioned capacity of the new table itself and all its global secondary indexes.
For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned capacity extremely rapidly, but the only quota that applies is that the aggregate provisioned capacity over all your tables and GSIs cannot exceed either of the per-account quotas.
DescribeLimits
should only be called periodically. You can expect throttling errors if you call it more than once in a minute.
The DescribeLimits
Request element has no content.
Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.
If you issue a DescribeTable
request immediately after a CreateTable
request, DynamoDB might return a ResourceNotFoundException
. This is because DescribeTable
uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable
request again.
Describes auto scaling settings across replicas of the global table at once.
This operation only applies to Version 2019.11.21 of global tables.
Gives a description of the Time to Live (TTL) status on the specified table.
", "DisableKinesisStreamingDestination": "Stops replication from the DynamoDB table to the Kinesis data stream. This is done without deleting either of the resources.
", "EnableKinesisStreamingDestination": "Starts table data replication to the specified Kinesis data stream at a timestamp chosen during the enable workflow. If this operation doesn't return results immediately, use DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is ACTIVE.
", - "ExecuteStatement": "This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL.
", - "ExecuteTransaction": "This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL.
", + "ExecuteStatement": "This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL.
", + "ExecuteTransaction": "This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL.
The entire transaction must consist of either read statements or write statements, you cannot mix both in one transaction. The EXISTS function is an exception and can be used to check the condition of specific attributes of the item in a similar manner to ConditionCheck
in the TransactWriteItems API.
Exports table data to an S3 bucket. The table must have point in time recovery enabled, and you can export data from any time within the point in time recovery window.
", "GetItem": "The GetItem
operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem
does not return any data and there will be no Item
element in the response.
GetItem
provides an eventually consistent read by default. If your application requires a strongly consistent read, set ConsistentRead
to true
. Although a strongly consistent read might take more time than an eventually consistent read, it always returns the last updated value.
List backups associated with an AWS account. To list backups for a given table, specify TableName
. ListBackups
returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested.
You can call ListBackups
a maximum of five times per second.
List backups associated with an Amazon Web Services account. To list backups for a given table, specify TableName
. ListBackups
returns a paginated list of results with at most 1 MB worth of items in a page. You can also specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these boundaries are for the time at which the original backup was requested.
You can call ListBackups
a maximum of five times per second.
Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.
", "ListExports": "Lists completed exports within the past 90 days.
", "ListGlobalTables": "Lists all global tables that have a replica in the specified Region.
This operation only applies to Version 2017.11.29 of global tables.
Returns an array of table names associated with the current account and endpoint. The output from ListTables
is paginated, with each page returning a maximum of 100 table names.
List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", - "PutItem": "Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues
parameter.
This topic provides general information about the PutItem
API.
For information on how to call the PutItem
API using the AWS SDK in specific languages, see the following:
When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null.
Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes cannot be empty.
Invalid Requests with empty values will be rejected with a ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists
function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists
function will only succeed if no matching item exists.
For more information about PutItem
, see Working with Items in the Amazon DynamoDB Developer Guide.
The Query
operation finds items based on primary key values. You can query any table or secondary index that has a composite primary key (a partition key and a sort key).
Use the KeyConditionExpression
parameter to provide a specific value for the partition key. The Query
operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query
operation by specifying a sort key value and a comparison operator in KeyConditionExpression
. To further refine the Query
results, you can optionally provide a FilterExpression
. A FilterExpression
determines which items within the results should be returned to you. All of the other results are discarded.
A Query
operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.
DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression
.
Query
results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
FilterExpression
is applied after a Query
finishes, but before the results are returned. A FilterExpression
cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression
.
A Query
operation can return an empty result set and a LastEvaluatedKey
if all the items read for the page of results are filtered out.
You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead
parameter to true
and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead
when querying a global secondary index.
Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues
parameter.
This topic provides general information about the PutItem
API.
For information on how to call the PutItem
API using the Amazon Web Services SDK in specific languages, see the following:
When you add an item, the primary key attributes are the only required attributes. Attribute values cannot be null.
Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. Set type attributes cannot be empty.
Invalid Requests with empty values will be rejected with a ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional expression that contains the attribute_not_exists
function with the name of the attribute being used as the partition key for the table. Since every record must contain that attribute, the attribute_not_exists
function will only succeed if no matching item exists.
For more information about PutItem
, see Working with Items in the Amazon DynamoDB Developer Guide.
You must provide the name of the partition key attribute and a single value for that attribute. Query
returns all items with that partition key value. Optionally, you can provide a sort key attribute and use a comparison operator to refine the search results.
Use the KeyConditionExpression
parameter to provide a specific value for the partition key. The Query
operation will return all of the items from the table or index with that partition key value. You can optionally narrow the scope of the Query
operation by specifying a sort key value and a comparison operator in KeyConditionExpression
. To further refine the Query
results, you can optionally provide a FilterExpression
. A FilterExpression
determines which items within the results should be returned to you. All of the other results are discarded.
A Query
operation always returns a result set. If no matching items are found, the result set will be empty. Queries that do not return results consume the minimum number of read capacity units for that type of read operation.
DynamoDB calculates the number of read capacity units consumed based on item size, not on the amount of data that is returned to an application. The number of capacity units consumed will be the same whether you request all of the attributes (the default behavior) or just some of them (using a projection expression). The number will also be the same whether or not you use a FilterExpression
.
Query
results are always sorted by the sort key value. If the data type of the sort key is Number, the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes. By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you will need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
FilterExpression
is applied after a Query
finishes, but before the results are returned. A FilterExpression
cannot contain partition key or sort key attributes. You need to specify those attributes in the KeyConditionExpression
.
A Query
operation can return an empty result set and a LastEvaluatedKey
if all the items read for the page of results are filtered out.
You can query a table, a local secondary index, or a global secondary index. For a query on a table or on a local secondary index, you can set the ConsistentRead
parameter to true
and obtain a strongly consistent result. Global secondary indexes support eventually consistent reads only, so do not specify ConsistentRead
when querying a global secondary index.
Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Restores the specified table to the specified point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.
Along with data, the following are also included on the new restored table using point in time recovery:
Global secondary indexes (GSIs)
Local secondary indexes (LSIs)
Provisioned read and write capacity
Encryption settings
All these settings come from the current settings of the source table at the time of restore.
You must manually set up the following on the restored table:
Auto scaling policies
IAM policies
Amazon CloudWatch metrics and alarms
Tags
Stream settings
Time to Live (TTL) settings
Point in time recovery settings
The Scan
operation returns one or more items and item attributes by accessing every item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey
value to continue the scan in a subsequent operation. The results also include the number of items exceeding the limit. A scan can result in no table data meeting the filter criteria.
A single Scan
operation reads up to the maximum number of items set (if using the Limit
parameter) or a maximum of 1 MB of data and then apply any filtering to the results using FilterExpression
. If LastEvaluatedKey
is present in the response, you need to paginate the result set. For more information, see Paginating the Results in the Amazon DynamoDB Developer Guide.
Scan
operations proceed sequentially; however, for faster performance on a large table or secondary index, applications can request a parallel Scan
operation by providing the Segment
and TotalSegments
parameters. For more information, see Parallel Scan in the Amazon DynamoDB Developer Guide.
Scan
uses eventually consistent reads when accessing the data in a table; therefore, the result set might not include the changes to data in the table immediately before the operation began. If you need a consistent copy of the data, as of the time that the Scan
begins, you can set the ConsistentRead
parameter to true
.
Associate a set of tags with an Amazon DynamoDB resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking. You can call TagResource up to five times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", - "TransactGetItems": " TransactGetItems
is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems
call can contain up to 25 TransactGetItem
objects, each of which contains a Get
structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems
cannot retrieve items from tables in more than one AWS account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.
DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
A conflicting operation is in the process of updating an item to be read.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
The aggregate size of the items in the transaction cannot exceed 4 MB.
TransactWriteItems
is a synchronous write operation that groups up to 25 action requests. These actions can target items in different tables, but not in different AWS accounts or Regions, and no two actions can target the same item. For example, you cannot both ConditionCheck
and Update
the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.
The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:
Put
Initiates a PutItem
operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Update
Initiates an UpdateItem
operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Delete
Initiates a DeleteItem
operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
ConditionCheck
Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
A condition in one of the condition expressions is not met.
An ongoing operation is in the process of updating the same item.
There is insufficient provisioned capacity for the transaction to be completed.
An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.
The aggregate size of the items in the transaction exceeds 4 MB.
There is a user error, such as an invalid data format.
TransactGetItems
is a synchronous operation that atomically retrieves multiple items from one or more tables (but not from indexes) in a single account and Region. A TransactGetItems
call can contain up to 25 TransactGetItem
objects, each of which contains a Get
structure that specifies an item to retrieve from a table in the account and Region. A call to TransactGetItems
cannot retrieve items from tables in more than one Amazon Web Services account or Region. The aggregate size of the items in the transaction cannot exceed 4 MB.
DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
A conflicting operation is in the process of updating an item to be read.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
The aggregate size of the items in the transaction cannot exceed 4 MB.
TransactWriteItems
is a synchronous write operation that groups up to 25 action requests. These actions can target items in different tables, but not in different Amazon Web Services accounts or Regions, and no two actions can target the same item. For example, you cannot both ConditionCheck
and Update
the same item. The aggregate size of the items in the transaction cannot exceed 4 MB.
The actions are completed atomically so that either all of them succeed, or all of them fail. They are defined by the following objects:
Put
— Initiates a PutItem
operation to write a new item. This structure specifies the primary key of the item to be written, the name of the table to write it in, an optional condition expression that must be satisfied for the write to succeed, a list of the item's attributes, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Update
— Initiates an UpdateItem
operation to update an existing item. This structure specifies the primary key of the item to be updated, the name of the table where it resides, an optional condition expression that must be satisfied for the update to succeed, an expression that defines one or more attributes to be updated, and a field indicating whether to retrieve the item's attributes if the condition is not met.
Delete
— Initiates a DeleteItem
operation to delete an existing item. This structure specifies the primary key of the item to be deleted, the name of the table where it resides, an optional condition expression that must be satisfied for the deletion to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
ConditionCheck
— Applies a condition to an item that is not being modified by the transaction. This structure specifies the primary key of the item to be checked, the name of the table where it resides, a condition expression that must be satisfied for the transaction to succeed, and a field indicating whether to retrieve the item's attributes if the condition is not met.
DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
A condition in one of the condition expressions is not met.
An ongoing operation is in the process of updating the same item.
There is insufficient provisioned capacity for the transaction to be completed.
An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.
The aggregate size of the items in the transaction exceeds 4 MB.
There is a user error, such as an invalid data format.
Removes the association of tags from an Amazon DynamoDB resource. You can call UntagResource
up to five times per second, per account.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", "UpdateContinuousBackups": " UpdateContinuousBackups
enables or disables point in time recovery for the specified table. A successful UpdateContinuousBackups
call returns the current ContinuousBackupsDescription
. Continuous backups are ENABLED
on all tables at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore to any point in time within EarliestRestorableDateTime
and LatestRestorableDateTime
.
LatestRestorableDateTime
is typically 5 minutes before the current time. You can restore your table to any point in time during the last 35 days.
Updates the status for contributor insights for a specific table or index.
", + "UpdateContributorInsights": "Updates the status for contributor insights for a specific table or index. CloudWatch Contributor Insights for DynamoDB graphs display the partition key and (if applicable) sort key of frequently accessed items and frequently throttled items in plaintext. If you require the use of AWS Key Management Service (KMS) to encrypt this table’s partition key and sort key data with an AWS managed key or customer managed key, you should not enable CloudWatch Contributor Insights for DynamoDB for this table.
", "UpdateGlobalTable": "Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units.
Although you can use UpdateGlobalTable
to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas.
If global secondary indexes are specified, then the following conditions must also be met:
The global secondary indexes must have the same name.
The global secondary indexes must have the same hash key and sort key (if present).
The global secondary indexes must have the same provisioned and maximum write capacity units.
Updates settings for a global table.
", "UpdateItem": "Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
You can also return the item's attribute values in the same UpdateItem
operation using the ReturnValues
parameter.
The reason DynamoDB archived the table. Currently, the only possible value is:
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The table was archived due to the table's AWS KMS key being inaccessible for more than seven days. An On-Demand backup was created at the archival time.
The reason DynamoDB archived the table. Currently, the only possible value is:
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The table was archived due to the table's KMS key being inaccessible for more than seven days. An On-Demand backup was created at the archival time.
BackupType:
USER
- You create and manage these using the on-demand backup feature.
SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion.
AWS_BACKUP
- On-demand backup created by you from AWS Backup service.
BackupType:
USER
- You create and manage these using the on-demand backup feature.
SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion.
AWS_BACKUP
- On-demand backup created by you from AWS Backup service.
BackupType:
USER
- You create and manage these using the on-demand backup feature.
SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion.
AWS_BACKUP
- On-demand backup created by you from Backup service.
BackupType:
USER
- You create and manage these using the on-demand backup feature.
SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted table to the state it was in just before the point of deletion.
AWS_BACKUP
- On-demand backup created by you from Backup service.
Set this value to get remaining results, if NextToken
was returned in the statement response.
Set this value to get remaining results, if NextToken
was returned in the statement response.
Providing a ClientRequestToken
makes the call to TransactWriteItems
idempotent, meaning that multiple identical calls have the same effect as one single call.
Although multiple identical calls using the same client request token produce the same result on the server (no side effects), the responses to the calls might not be the same. If the ReturnConsumedCapacity>
parameter is set, then the initial TransactWriteItems
call returns the amount of write capacity units consumed in making the changes. Subsequent TransactWriteItems
calls with the same client token return the number of read capacity units consumed in reading the item.
A client request token is valid for 10 minutes after the first request that uses it is completed. After 10 minutes, any request with the same client token is treated as a new request. Do not resubmit the same request with the same client token for more than 10 minutes, or the result might not be idempotent.
If you submit a request with the same client token but a change in other parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch
exception.
The read consistency of the PartiQL batch request.
", - "ExecuteStatementInput$ConsistentRead": " The consistency of a read operation. If set to true
, then a strongly consistent read is used; otherwise, an eventually consistent read is used.
The consistency of a read operation. If set to true
, then a strongly consistent read is used; otherwise, an eventually consistent read is used.
Determines the read consistency model: If set to true
, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.
The consistency of a read operation. If set to true
, then a strongly consistent read is used; otherwise, an eventually consistent read is used.
Determines the read consistency model: If set to true
, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.
Strongly consistent reads are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead
set to true
, you will receive a ValidationException
.
List of names of the associated Alpine rules.
" + "DescribeContributorInsightsOutput$ContributorInsightsRuleList": "List of names of the associated contributor insights rules.
" } }, "ContributorInsightsStatus": { "base": null, "refs": { "ContributorInsightsSummary$ContributorInsightsStatus": "Describes the current status for contributor insights for the given table and index, if applicable.
", - "DescribeContributorInsightsOutput$ContributorInsightsStatus": "Current Status contributor insights.
", + "DescribeContributorInsightsOutput$ContributorInsightsStatus": "Current status of contributor insights.
", "UpdateContributorInsightsOutput$ContributorInsightsStatus": "The status of contributor insights
" } }, @@ -713,7 +713,7 @@ "ReplicaDescription$ReplicaInaccessibleDateTime": "The time at which the replica was first detected as inaccessible. To determine cause of inaccessibility check the ReplicaStatus
property.
Point in time or source backup time.
", "RestoreTableToPointInTimeInput$RestoreDateTime": "Time in the past to restore the table to.
", - "SSEDescription$InaccessibleEncryptionDateTime": "Indicates the time, in UNIX epoch date format, when DynamoDB detected that the table's AWS KMS key was inaccessible. This attribute will automatically be cleared when DynamoDB detects that the table's AWS KMS key is accessible again. DynamoDB will initiate the table archival process when table's AWS KMS key remains inaccessible for more than seven days from this date.
", + "SSEDescription$InaccessibleEncryptionDateTime": "Indicates the time, in UNIX epoch date format, when DynamoDB detected that the table's KMS key was inaccessible. This attribute will automatically be cleared when DynamoDB detects that the table's KMS key is accessible again. DynamoDB will initiate the table archival process when table's KMS key remains inaccessible for more than seven days from this date.
", "TableDescription$CreationDateTime": "The date and time when the table was created, in UNIX epoch time format.
" } }, @@ -912,7 +912,7 @@ } }, "DuplicateItemException": { - "base": "There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table.
", + "base": "There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table.
", "refs": { } }, @@ -1154,7 +1154,7 @@ "FailureException": { "base": "Represents a failure a contributor insights operation.
", "refs": { - "DescribeContributorInsightsOutput$FailureException": "Returns information about the last failure that encountered.
The most common exceptions for a FAILED status are:
LimitExceededException - Per-account Amazon CloudWatch Contributor Insights rule limit reached. Please disable Contributor Insights for other tables/indexes OR disable Contributor Insights rules before retrying.
AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot be modified due to insufficient permissions.
AccessDeniedException - Failed to create service-linked role for Contributor Insights due to insufficient permissions.
InternalServerError - Failed to create Amazon CloudWatch Contributor Insights rules. Please retry request.
Returns information about the last failure that was encountered.
The most common exceptions for a FAILED status are:
LimitExceededException - Per-account Amazon CloudWatch Contributor Insights rule limit reached. Please disable Contributor Insights for other tables/indexes OR disable Contributor Insights rules before retrying.
AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot be modified due to insufficient permissions.
AccessDeniedException - Failed to create service-linked role for Contributor Insights due to insufficient permissions.
InternalServerError - Failed to create Amazon CloudWatch Contributor Insights rules. Please retry request.
If a read operation was used, this property will contain the result of the reade operation; a map of attribute names and their values. For the write operations this value will be empty.
", + "ExecuteStatementOutput$Items": "If a read operation was used, this property will contain the result of the read operation; a map of attribute names and their values. For the write operations this value will be empty.
", "QueryOutput$Items": "An array of item attributes that match the query criteria. Each element in this array consists of an attribute name and the value for that attribute.
", "ScanOutput$Items": "An array of item attributes that match the scan criteria. Each element in this array consists of an attribute name and the value for that attribute.
" } @@ -1453,23 +1453,23 @@ "ItemResponseList": { "base": null, "refs": { - "ExecuteTransactionOutput$Responses": "The response to a PartiQL transaction.
", + "ExecuteTransactionOutput$Responses": "The response to a PartiQL transaction.
", "TransactGetItemsOutput$Responses": "An ordered array of up to 25 ItemResponse
objects, each of which corresponds to the TransactGetItem
object in the same position in the TransactItems array. Each ItemResponse
object contains a Map of the name-value pairs that are the projected attributes of the requested item.
If a requested item could not be retrieved, the corresponding ItemResponse
object is Null, or if the requested item has no projected attributes, the corresponding ItemResponse
object is an empty Map.
The AWS KMS customer master key (CMK) ARN used for the AWS KMS encryption.
" + "SSEDescription$KMSMasterKeyArn": "The KMS key ARN used for the KMS encryption.
" } }, "KMSMasterKeyId": { "base": null, "refs": { - "CreateReplicationGroupMemberAction$KMSMasterKeyId": "The AWS KMS customer master key (CMK) that should be used for AWS KMS encryption in the new replica. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS master key alias/aws/dynamodb.
", - "ReplicaDescription$KMSMasterKeyId": "The AWS KMS customer master key (CMK) of the replica that will be used for AWS KMS encryption.
", - "SSESpecification$KMSMasterKeyId": "The AWS KMS customer master key (CMK) that should be used for the AWS KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB customer master key alias/aws/dynamodb.
", - "UpdateReplicationGroupMemberAction$KMSMasterKeyId": "The AWS KMS customer master key (CMK) of the replica that should be used for AWS KMS encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS master key alias/aws/dynamodb.
" + "CreateReplicationGroupMemberAction$KMSMasterKeyId": "The KMS key that should be used for KMS encryption in the new replica. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS key alias/aws/dynamodb
.
The KMS key of the replica that will be used for KMS encryption.
", + "SSESpecification$KMSMasterKeyId": "The KMS key that should be used for the KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB key alias/aws/dynamodb
.
The KMS key of the replica that should be used for KMS encryption. To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter if the key is different from the default DynamoDB KMS key alias/aws/dynamodb
.
The list of PartiQL statements representing the transaction to run.
" + "ExecuteTransactionInput$TransactStatements": "The list of PartiQL statements representing the transaction to run.
" } }, "PartiQLBatchRequest": { "base": null, "refs": { - "BatchExecuteStatementInput$Statements": "The list of PartiQL statements representing the batch to run.
" + "BatchExecuteStatementInput$Statements": "The list of PartiQL statements representing the batch to run.
" } }, "PartiQLBatchResponse": { "base": null, "refs": { - "BatchExecuteStatementOutput$Responses": "The response to each PartiQL statement in the batch.
" + "BatchExecuteStatementOutput$Responses": "The response to each PartiQL statement in the batch.
" } }, "PartiQLNextToken": { "base": null, "refs": { - "ExecuteStatementInput$NextToken": " Set this value to get remaining results, if NextToken
was returned in the statement response.
If the response of a read request exceeds the response payload limit DynamoDB will set this value in the response. If set, you can use that this value in the subsequent request to get the remaining results.
" + "ExecuteStatementInput$NextToken": "Set this value to get remaining results, if NextToken
was returned in the statement response.
If the response of a read request exceeds the response payload limit DynamoDB will set this value in the response. If set, you can use that this value in the subsequent request to get the remaining results.
" } }, "PartiQLStatement": { "base": null, "refs": { "BatchStatementRequest$Statement": "A valid PartiQL statement.
", - "ExecuteStatementInput$Statement": "The PartiQL statement representing the operation to run.
", + "ExecuteStatementInput$Statement": "The PartiQL statement representing the operation to run.
", "ParameterizedStatement$Statement": "A PartiQL statment that uses parameters.
" } }, @@ -1868,7 +1868,7 @@ "base": null, "refs": { "BatchStatementRequest$Parameters": "The parameters associated with a PartiQL statement in the batch request.
", - "ExecuteStatementInput$Parameters": "The parameters for the PartiQL statement, if any.
", + "ExecuteStatementInput$Parameters": "The parameters for the PartiQL statement, if any.
", "ParameterizedStatement$Parameters": "The parameter values.
" } }, @@ -1922,7 +1922,7 @@ } }, "ProvisionedThroughputExceededException": { - "base": "Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
", + "base": "Your request rate is too high. The Amazon Web Services SDKs for DynamoDB automatically retry requests that receive this exception. Your request is eventually successful, unless your retry queue is too large to finish. Reduce the frequency of requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
", "refs": { } }, @@ -2155,7 +2155,7 @@ "base": null, "refs": { "ReplicaAutoScalingDescription$ReplicaStatus": "The current state of the replica:
CREATING
- The replica is being created.
UPDATING
- The replica is being updated.
DELETING
- The replica is being deleted.
ACTIVE
- The replica is ready for use.
The current state of the replica:
CREATING
- The replica is being created.
UPDATING
- The replica is being updated.
DELETING
- The replica is being deleted.
ACTIVE
- The replica is ready for use.
REGION_DISABLED
- The replica is inaccessible because the AWS Region has been disabled.
If the AWS Region remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The AWS KMS key used to encrypt the table is inaccessible.
If the AWS KMS key remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
The current state of the replica:
CREATING
- The replica is being created.
UPDATING
- The replica is being updated.
DELETING
- The replica is being deleted.
ACTIVE
- The replica is ready for use.
REGION_DISABLED
- The replica is inaccessible because the Amazon Web Services Region has been disabled.
If the Amazon Web Services Region remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The KMS key used to encrypt the table is inaccessible.
If the KMS key remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
The current state of the Region:
CREATING
- The Region is being created.
UPDATING
- The Region is being updated.
DELETING
- The Region is being deleted.
ACTIVE
- The Region is ready for use.
Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
", + "base": "Throughput exceeds the current throughput quota for your account. Please contact Amazon Web Services Support to request a quota increase.
", "refs": { } }, @@ -2279,7 +2279,7 @@ "base": null, "refs": { "DeleteItemInput$ReturnValues": "Use ReturnValues
if you want to get the item attributes as they appeared before they were deleted. For DeleteItem
, the valid values are:
NONE
- If ReturnValues
is not specified, or if its value is NONE
, then nothing is returned. (This setting is the default for ReturnValues
.)
ALL_OLD
- The content of the old item is returned.
The ReturnValues
parameter is used by several DynamoDB operations; however, DeleteItem
does not recognize any values other than NONE
or ALL_OLD
.
Use ReturnValues
if you want to get the item attributes as they appeared before they were updated with the PutItem
request. For PutItem
, the valid values are:
NONE
- If ReturnValues
is not specified, or if its value is NONE
, then nothing is returned. (This setting is the default for ReturnValues
.)
ALL_OLD
- If PutItem
overwrote an attribute name-value pair, then the content of the old item is returned.
The ReturnValues
parameter is used by several DynamoDB operations; however, PutItem
does not recognize any values other than NONE
or ALL_OLD
.
Use ReturnValues
if you want to get the item attributes as they appeared before they were updated with the PutItem
request. For PutItem
, the valid values are:
NONE
- If ReturnValues
is not specified, or if its value is NONE
, then nothing is returned. (This setting is the default for ReturnValues
.)
ALL_OLD
- If PutItem
overwrote an attribute name-value pair, then the content of the old item is returned.
The values returned are strongly consistent.
The ReturnValues
parameter is used by several DynamoDB operations; however, PutItem
does not recognize any values other than NONE
or ALL_OLD
.
Use ReturnValues
if you want to get the item attributes as they appear before or after they are updated. For UpdateItem
, the valid values are:
NONE
- If ReturnValues
is not specified, or if its value is NONE
, then nothing is returned. (This setting is the default for ReturnValues
.)
ALL_OLD
- Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
UPDATED_OLD
- Returns only the updated attributes, as they appeared before the UpdateItem operation.
ALL_NEW
- Returns all of the attributes of the item, as they appear after the UpdateItem operation.
UPDATED_NEW
- Returns only the updated attributes, as they appear after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
The values returned are strongly consistent.
" } }, @@ -2302,8 +2302,8 @@ "S3BucketOwner": { "base": null, "refs": { - "ExportDescription$S3BucketOwner": "The ID of the AWS account that owns the bucket containing the export.
", - "ExportTableToPointInTimeInput$S3BucketOwner": "The ID of the AWS account that owns the bucket the export will be stored in.
" + "ExportDescription$S3BucketOwner": "The ID of the Amazon Web Services account that owns the bucket containing the export.
", + "ExportTableToPointInTimeInput$S3BucketOwner": "The ID of the Amazon Web Services account that owns the bucket the export will be stored in.
" } }, "S3Prefix": { @@ -2316,15 +2316,15 @@ "S3SseAlgorithm": { "base": null, "refs": { - "ExportDescription$S3SseAlgorithm": "Type of encryption used on the bucket where export data is stored. Valid values for S3SseAlgorithm
are:
AES256
- server-side encryption with Amazon S3 managed keys
KMS
- server-side encryption with AWS KMS managed keys
Type of encryption used on the bucket where export data will be stored. Valid values for S3SseAlgorithm
are:
AES256
- server-side encryption with Amazon S3 managed keys
KMS
- server-side encryption with AWS KMS managed keys
Type of encryption used on the bucket where export data is stored. Valid values for S3SseAlgorithm
are:
AES256
- server-side encryption with Amazon S3 managed keys
KMS
- server-side encryption with KMS managed keys
Type of encryption used on the bucket where export data will be stored. Valid values for S3SseAlgorithm
are:
AES256
- server-side encryption with Amazon S3 managed keys
KMS
- server-side encryption with KMS managed keys
The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data is stored (if applicable).
", - "ExportTableToPointInTimeInput$S3SseKmsKeyId": "The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data will be stored (if applicable).
" + "ExportDescription$S3SseKmsKeyId": "The ID of the KMS managed key used to encrypt the S3 bucket where export data is stored (if applicable).
", + "ExportTableToPointInTimeInput$S3SseKmsKeyId": "The ID of the KMS managed key used to encrypt the S3 bucket where export data will be stored (if applicable).
" } }, "SSEDescription": { @@ -2337,7 +2337,7 @@ "SSEEnabled": { "base": null, "refs": { - "SSESpecification$Enabled": "Indicates whether server-side encryption is done using an AWS managed CMK or an AWS owned CMK. If enabled (true), server-side encryption type is set to KMS
and an AWS managed CMK is used (AWS KMS charges apply). If disabled (false) or not specified, server-side encryption is set to AWS owned CMK.
Indicates whether server-side encryption is done using an Amazon Web Services managed key or an Amazon Web Services owned key. If enabled (true), server-side encryption type is set to KMS
and an Amazon Web Services managed key is used (KMS charges apply). If disabled (false) or not specified, server-side encryption is set to Amazon Web Services owned key.
Server-side encryption type. The only supported value is:
KMS
- Server-side encryption that uses AWS Key Management Service. The key is stored in your account and is managed by AWS KMS (AWS KMS charges apply).
Server-side encryption type. The only supported value is:
KMS
- Server-side encryption that uses AWS Key Management Service. The key is stored in your account and is managed by AWS KMS (AWS KMS charges apply).
Server-side encryption type. The only supported value is:
KMS
- Server-side encryption that uses Key Management Service. The key is stored in your account and is managed by KMS (KMS charges apply).
Server-side encryption type. The only supported value is:
KMS
- Server-side encryption that uses Key Management Service. The key is stored in your account and is managed by KMS (KMS charges apply).
The human-readable string that corresponds to the replica status.
", "LocalSecondaryIndexDescription$IndexArn": "The Amazon Resource Name (ARN) that uniquely identifies the index.
", "TableDescription$TableArn": "The Amazon Resource Name (ARN) that uniquely identifies the table.
", - "TableDescription$LatestStreamLabel": "A timestamp, in ISO 8601 format, for this stream.
Note that LatestStreamLabel
is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:
AWS customer ID
Table name
StreamLabel
Represents the version of global tables in use, if the table is replicated across AWS Regions.
" + "TableDescription$LatestStreamLabel": "A timestamp, in ISO 8601 format, for this stream.
Note that LatestStreamLabel
is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:
Amazon Web Services customer ID
Table name
StreamLabel
Represents the version of global tables in use, if the table is replicated across Amazon Web Services Regions.
" } }, "StringAttributeValue": { @@ -2608,11 +2608,11 @@ "base": null, "refs": { "TableAutoScalingDescription$TableStatus": "The current state of the table:
CREATING
- The table is being created.
UPDATING
- The table is being updated.
DELETING
- The table is being deleted.
ACTIVE
- The table is ready for use.
The current state of the table:
CREATING
- The table is being created.
UPDATING
- The table is being updated.
DELETING
- The table is being deleted.
ACTIVE
- The table is ready for use.
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The AWS KMS key used to encrypt the table in inaccessible. Table operations may fail due to failure to use the AWS KMS key. DynamoDB will initiate the table archival process when a table's AWS KMS key remains inaccessible for more than seven days.
ARCHIVING
- The table is being archived. Operations are not allowed until archival is complete.
ARCHIVED
- The table has been archived. See the ArchivalReason for more information.
The current state of the table:
CREATING
- The table is being created.
UPDATING
- The table is being updated.
DELETING
- The table is being deleted.
ACTIVE
- The table is ready for use.
INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The KMS key used to encrypt the table in inaccessible. Table operations may fail due to failure to use the KMS key. DynamoDB will initiate the table archival process when a table's KMS key remains inaccessible for more than seven days.
ARCHIVING
- The table is being archived. Operations are not allowed until archival is complete.
ARCHIVED
- The table has been archived. See the ArchivalReason for more information.
Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table.
AWS-assigned tag names and values are automatically assigned the aws:
prefix, which the user cannot assign. AWS-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user:
in the Cost Allocation Report. You cannot backdate the application of a tag.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", + "base": "Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table.
Amazon Web Services-assigned tag names and values are automatically assigned the aws:
prefix, which the user cannot assign. Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the prefix user:
in the Cost Allocation Report. You cannot backdate the application of a tag.
For an overview on tagging DynamoDB resources, see Tagging for DynamoDB in the Amazon DynamoDB Developer Guide.
", "refs": { "TagList$member": null } @@ -2626,7 +2626,7 @@ "TagKeyString": { "base": null, "refs": { - "Tag$Key": "The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.
", + "Tag$Key": "The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only have up to one tag with the same key. If you try to add an existing tag (same key), the existing tag value will be updated to the new value.
", "TagKeyList$member": null } }, @@ -2725,7 +2725,7 @@ "TransactWriteItemList": { "base": null, "refs": { - "TransactWriteItemsInput$TransactItems": "An ordered array of up to 25 TransactWriteItem
objects, each of which contains a ConditionCheck
, Put
, Update
, or Delete
object. These can operate on items in different tables, but the tables must reside in the same AWS account and Region, and no two of them can operate on the same item.
An ordered array of up to 25 TransactWriteItem
objects, each of which contains a ConditionCheck
, Put
, Update
, or Delete
object. These can operate on items in different tables, but the tables must reside in the same Amazon Web Services account and Region, and no two of them can operate on the same item.
The entire transaction request was canceled.
DynamoDB cancels a TransactWriteItems
request under the following circumstances:
A condition in one of the condition expressions is not met.
A table in the TransactWriteItems
request is in a different account or region.
More than one action in the TransactWriteItems
operation targets the same item.
There is insufficient provisioned capacity for the transaction to be completed.
An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.
There is a user error, such as an invalid data format.
DynamoDB cancels a TransactGetItems
request under the following circumstances:
There is an ongoing TransactGetItems
operation that conflicts with a concurrent PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
request. In this case the TransactGetItems
operation fails with a TransactionCanceledException
.
A table in the TransactGetItems
request is in a different account or region.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE
code and Null
message.
Cancellation reason codes and possible error messages:
No Errors:
Code: NONE
Message: null
Conditional Check Failed:
Code: ConditionalCheckFailed
Message: The conditional request failed.
Item Collection Size Limit Exceeded:
Code: ItemCollectionSizeLimitExceeded
Message: Collection size exceeded.
Transaction Conflict:
Code: TransactionConflict
Message: Transaction is ongoing for the item.
Provisioned Throughput Exceeded:
Code: ProvisionedThroughputExceeded
Messages:
The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.
This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.
This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
Throttling Error:
Code: ThrottlingError
Messages:
Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.
Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.
This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.
Validation Error:
Code: ValidationError
Messages:
One or more parameter values were invalid.
The update expression attempted to update the secondary index key beyond allowed size limits.
The update expression attempted to update the secondary index key to unsupported type.
An operand in the update expression has an incorrect data type.
Item size to update has exceeded the maximum allowed size.
Number overflow. Attempting to store a number with magnitude larger than supported range.
Type mismatch for attribute to update.
Nesting Levels have exceeded supported limits.
The document path provided in the update expression is invalid for update.
The provided expression refers to an attribute that does not exist in the item.
The entire transaction request was canceled.
DynamoDB cancels a TransactWriteItems
request under the following circumstances:
A condition in one of the condition expressions is not met.
A table in the TransactWriteItems
request is in a different account or region.
More than one action in the TransactWriteItems
operation targets the same item.
There is insufficient provisioned capacity for the transaction to be completed.
An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) becomes too large, or a similar validation error occurs because of changes made by the transaction.
There is a user error, such as an invalid data format.
DynamoDB cancels a TransactGetItems
request under the following circumstances:
There is an ongoing TransactGetItems
operation that conflicts with a concurrent PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
request. In this case the TransactGetItems
operation fails with a TransactionCanceledException
.
A table in the TransactGetItems
request is in a different account or region.
There is insufficient provisioned capacity for the transaction to be completed.
There is a user error, such as an invalid data format.
If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property. This property is not set for other languages. Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE
code and Null
message.
Cancellation reason codes and possible error messages:
No Errors:
Code: NONE
Message: null
Conditional Check Failed:
Code: ConditionalCheckFailed
Message: The conditional request failed.
Item Collection Size Limit Exceeded:
Code: ItemCollectionSizeLimitExceeded
Message: Collection size exceeded.
Transaction Conflict:
Code: TransactionConflict
Message: Transaction is ongoing for the item.
Provisioned Throughput Exceeded:
Code: ProvisionedThroughputExceeded
Messages:
The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the UpdateTable API.
This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.
This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
Throttling Error:
Code: ThrottlingError
Messages:
Throughput exceeds the current capacity of your table or index. DynamoDB is automatically scaling your table or index so please try again shortly. If exceptions persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.
Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so please try again shortly.
This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.
Validation Error:
Code: ValidationError
Messages:
One or more parameter values were invalid.
The update expression attempted to update the secondary index key beyond allowed size limits.
The update expression attempted to update the secondary index key to unsupported type.
An operand in the update expression has an incorrect data type.
Item size to update has exceeded the maximum allowed size.
Number overflow. Attempting to store a number with magnitude larger than supported range.
Type mismatch for attribute to update.
Nesting Levels have exceeded supported limits.
The document path provided in the update expression is invalid for update.
The provided expression refers to an attribute that does not exist in the item.
Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.
", "ProvisionByoipCidrRequest$PubliclyAdvertisable": "(IPv6 only) Indicate whether the address range will be publicly advertised to the internet.
Default: true
", "ProvisionByoipCidrRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Reserved.
", "PurchaseReservedInstancesOfferingRequest$DryRun": "Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The valid number of cores that can be configured for the instance type.
" } }, + "CoreNetworkArn": { + "base": null, + "refs": { + "CreateRouteRequest$CoreNetworkArn": null, + "ReplaceRouteRequest$CoreNetworkArn": null, + "Route$CoreNetworkArn": null + } + }, "CpuManufacturer": { "base": null, "refs": { diff --git a/models/apis/translate/2017-07-01/api-2.json b/models/apis/translate/2017-07-01/api-2.json index c8433a4d5dd..ac71b453941 100644 --- a/models/apis/translate/2017-07-01/api-2.json +++ b/models/apis/translate/2017-07-01/api-2.json @@ -346,6 +346,13 @@ }, "exception":true }, + "Directionality":{ + "type":"string", + "enum":[ + "UNI", + "MULTI" + ] + }, "EncryptionKey":{ "type":"structure", "required":[ @@ -385,10 +392,7 @@ }, "GetTerminologyRequest":{ "type":"structure", - "required":[ - "Name", - "TerminologyDataFormat" - ], + "required":["Name"], "members":{ "Name":{"shape":"ResourceName"}, "TerminologyDataFormat":{"shape":"TerminologyDataFormat"} @@ -398,7 +402,8 @@ "type":"structure", "members":{ "TerminologyProperties":{"shape":"TerminologyProperties"}, - "TerminologyDataLocation":{"shape":"TerminologyDataLocation"} + "TerminologyDataLocation":{"shape":"TerminologyDataLocation"}, + "AuxiliaryDataLocation":{"shape":"TerminologyDataLocation"} } }, "IamRoleArn":{ @@ -425,7 +430,8 @@ "ImportTerminologyResponse":{ "type":"structure", "members":{ - "TerminologyProperties":{"shape":"TerminologyProperties"} + "TerminologyProperties":{"shape":"TerminologyProperties"}, + "AuxiliaryDataLocation":{"shape":"TerminologyDataLocation"} } }, "InputDataConfig":{ @@ -765,14 +771,16 @@ ], "members":{ "File":{"shape":"TerminologyFile"}, - "Format":{"shape":"TerminologyDataFormat"} + "Format":{"shape":"TerminologyDataFormat"}, + "Directionality":{"shape":"Directionality"} } }, "TerminologyDataFormat":{ "type":"string", "enum":[ "CSV", - "TMX" + "TMX", + "TSV" ] }, "TerminologyDataLocation":{ @@ -803,7 +811,11 @@ "SizeBytes":{"shape":"Integer"}, "TermCount":{"shape":"Integer"}, "CreatedAt":{"shape":"Timestamp"}, - "LastUpdatedAt":{"shape":"Timestamp"} + "LastUpdatedAt":{"shape":"Timestamp"}, + "Directionality":{"shape":"Directionality"}, + "Message":{"shape":"UnboundedLengthString"}, + "SkippedTermCount":{"shape":"Integer"}, + "Format":{"shape":"TerminologyDataFormat"} } }, "TerminologyPropertiesList":{ diff --git a/models/apis/translate/2017-07-01/docs-2.json b/models/apis/translate/2017-07-01/docs-2.json index 47d2bf018bf..08a542ffc03 100644 --- a/models/apis/translate/2017-07-01/docs-2.json +++ b/models/apis/translate/2017-07-01/docs-2.json @@ -110,6 +110,13 @@ "refs": { } }, + "Directionality": { + "base": null, + "refs": { + "TerminologyData$Directionality": "The directionality of your terminology resource indicates whether it has one source language (uni-directional) or multiple (multi-directional).
The terminology resource has one source language (for example, the first column in a CSV file), and all of its other languages are target languages.
Any language in the terminology resource can be the source language or a target language. A single multi-directional terminology resource can be used for jobs that translate different language pairs. For example, if the terminology contains terms in English and Spanish, then it can be used for jobs that translate English to Spanish and jobs that translate Spanish to English.
When you create a custom terminology resource without specifying the directionality, it behaves as uni-directional terminology, although this parameter will have a null value.
", + "TerminologyProperties$Directionality": "The directionality of your terminology resource indicates whether it has one source language (uni-directional) or multiple (multi-directional).
The terminology resource has one source language (the first column in a CSV file), and all of its other languages are target languages.
Any language in the terminology resource can be the source language.
The encryption key used to encrypt this object.
", "refs": { @@ -183,7 +190,8 @@ "JobDetails$DocumentsWithErrorsCount": "The number of documents that could not be processed during a translation job.
", "JobDetails$InputDocumentsCount": "The number of documents used as input in a translation job.
", "TerminologyProperties$SizeBytes": "The size of the file used when importing a custom terminology.
", - "TerminologyProperties$TermCount": "The number of terms included in the custom terminology.
" + "TerminologyProperties$TermCount": "The number of terms included in the custom terminology.
", + "TerminologyProperties$SkippedTermCount": "The number of terms in the input file that Amazon Translate skipped when you created or updated the terminology resource.
" } }, "InternalServerException": { @@ -261,7 +269,7 @@ "base": null, "refs": { "ParallelDataProperties$TargetLanguageCodes": "The language codes for the target languages available in the parallel data file. All possible target languages are returned as an array.
", - "TerminologyProperties$TargetLanguageCodes": "The language codes for the target languages available with the custom terminology file. All possible target languages are returned in array.
" + "TerminologyProperties$TargetLanguageCodes": "The language codes for the target languages available with the custom terminology resource. All possible target languages are returned in array.
" } }, "LimitExceededException": { @@ -518,14 +526,17 @@ "TerminologyDataFormat": { "base": null, "refs": { - "GetTerminologyRequest$TerminologyDataFormat": "The data format of the custom terminology being retrieved, either CSV or TMX.
", - "TerminologyData$Format": "The data format of the custom terminology. Either CSV or TMX.
" + "GetTerminologyRequest$TerminologyDataFormat": "The data format of the custom terminology being retrieved.
If you don't specify this parameter, Amazon Translate returns a file that has the same format as the file that was imported to create the terminology.
If you specify this parameter when you retrieve a multi-directional terminology resource, you must specify the same format as that of the input file that was imported to create it. Otherwise, Amazon Translate throws an error.
", + "TerminologyData$Format": "The data format of the custom terminology.
", + "TerminologyProperties$Format": "The format of the custom terminology input file.
" } }, "TerminologyDataLocation": { "base": "The location of the custom terminology data.
", "refs": { - "GetTerminologyResponse$TerminologyDataLocation": "The data location of the custom terminology being retrieved. The custom terminology file is returned in a presigned url that has a 30 minute expiration.
" + "GetTerminologyResponse$TerminologyDataLocation": "The data location of the custom terminology being retrieved. The custom terminology file is returned in a presigned url that has a 30 minute expiration.
", + "GetTerminologyResponse$AuxiliaryDataLocation": "The Amazon S3 location of a file that provides any errors or warnings that were produced by your input file. This file was created when Amazon Translate attempted to create a terminology resource. The location is returned as a presigned URL to that has a 30 minute expiration.
", + "ImportTerminologyResponse$AuxiliaryDataLocation": "The Amazon S3 location of a file that provides any errors or warnings that were produced by your input file. This file was created when Amazon Translate attempted to create a terminology resource. The location is returned as a presigned URL to that has a 30 minute expiration.
" } }, "TerminologyFile": { @@ -606,6 +617,7 @@ "base": null, "refs": { "ParallelDataProperties$Message": "Additional information from Amazon Translate about the parallel data resource.
", + "TerminologyProperties$Message": "Additional information from Amazon Translate about the terminology resource.
", "TextTranslationJobProperties$Message": "An explanation of any errors that may have occurred during the translation job.
" } }, diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index ded375725a2..2fe4d886cd5 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -11378,12 +11378,24 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "fips.transcribe.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "fips.transcribe.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -14836,6 +14848,12 @@ "partitionEndpoint" : "aws-us-gov-global" }, "identitystore" : { + "defaults" : { + "variants" : [ { + "hostname" : "identitystore.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, "endpoints" : { "fips-us-gov-west-1" : { "credentialScope" : { @@ -15444,6 +15462,12 @@ } }, "rds" : { + "defaults" : { + "variants" : [ { + "hostname" : "rds.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, "endpoints" : { "rds.us-gov-east-1" : { "credentialScope" : { @@ -15629,6 +15653,12 @@ } }, "runtime.sagemaker" : { + "defaults" : { + "variants" : [ { + "hostname" : "runtime.sagemaker.{region}.{dnsSuffix}", + "tags" : [ "fips" ] + } ] + }, "endpoints" : { "us-gov-west-1" : { "variants" : [ { diff --git a/service/dynamodb/api.go b/service/dynamodb/api.go index 5bd10269165..55ad551db5b 100644 --- a/service/dynamodb/api.go +++ b/service/dynamodb/api.go @@ -60,9 +60,12 @@ func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInpu // BatchExecuteStatement API operation for Amazon DynamoDB. // -// This operation allows you to perform batch reads and writes on data stored +// This operation allows you to perform batch reads or writes on data stored // in DynamoDB, using PartiQL. // +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -73,7 +76,7 @@ func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInpu // Returned Error Types: // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -235,11 +238,11 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // // Returned Error Types: // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -248,7 +251,7 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -477,11 +480,11 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // Returned Error Types: // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -494,7 +497,7 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -919,9 +922,10 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // CreateTable API operation for Amazon DynamoDB. // -// The CreateTable operation adds a new table to your account. In an AWS account, -// table names must be unique within each Region. That is, you can have two -// tables with same name if you create the tables in different Regions. +// The CreateTable operation adds a new table to your account. In an Amazon +// Web Services account, table names must be unique within each Region. That +// is, you can have two tables with same name if you create the tables in different +// Regions. // // CreateTable is an asynchronous operation. Upon receiving a CreateTable request, // DynamoDB immediately returns a response with a TableStatus of CREATING. After @@ -1210,11 +1214,11 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque // A condition specified in the operation could not be evaluated. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -1230,7 +1234,7 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -2356,24 +2360,26 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // DescribeLimits API operation for Amazon DynamoDB. // -// Returns the current provisioned-capacity quotas for your AWS account in a -// Region, both for the Region as a whole and for any one DynamoDB table that -// you create there. +// Returns the current provisioned-capacity quotas for your Amazon Web Services +// account in a Region, both for the Region as a whole and for any one DynamoDB +// table that you create there. // -// When you establish an AWS account, the account has initial quotas on the -// maximum read capacity units and write capacity units that you can provision -// across all of your DynamoDB tables in a given Region. Also, there are per-table -// quotas that apply when you create a table there. For more information, see -// Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you +// can provision across all of your DynamoDB tables in a given Region. Also, +// there are per-table quotas that apply when you create a table there. For +// more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // page in the Amazon DynamoDB Developer Guide. // -// Although you can increase these quotas by filing a case at AWS Support Center -// (https://console.aws.amazon.com/support/home#/), obtaining the increase is -// not instantaneous. The DescribeLimits action lets you write code to compare -// the capacity you are currently using to those quotas imposed by your account -// so that you have enough time to apply for an increase before you hit a quota. +// Although you can increase these quotas by filing a case at Amazon Web Services +// Support Center (https://console.aws.amazon.com/support/home#/), obtaining +// the increase is not instantaneous. The DescribeLimits action lets you write +// code to compare the capacity you are currently using to those quotas imposed +// by your account so that you have enough time to apply for an increase before +// you hit a quota. // -// For example, you could use one of the AWS SDKs to do the following: +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: // // Call DescribeLimits for a particular Region to obtain your current account // quotas on provisioned capacity there. @@ -3075,11 +3081,11 @@ func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *r // A condition specified in the operation could not be evaluated. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -3095,7 +3101,7 @@ func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *r // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -3174,6 +3180,12 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // This operation allows you to perform transactional reads or writes on data // stored in DynamoDB, using PartiQL. // +// The entire transaction must consist of either read statements or write statements, +// you cannot mix both in one transaction. The EXISTS function is an exception +// and can be used to check the condition of specific attributes of the item +// in a similar manner to ConditionCheck in the TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) +// API. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3281,16 +3293,16 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // payload but with an idempotent token that was already used. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -3513,11 +3525,11 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // // Returned Error Types: // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -3526,7 +3538,7 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -3623,10 +3635,10 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // ListBackups API operation for Amazon DynamoDB. // -// List backups associated with an AWS account. To list backups for a given -// table, specify TableName. ListBackups returns a paginated list of results -// with at most 1 MB worth of items in a page. You can also specify a maximum -// number of entries to be returned in a page. +// List backups associated with an Amazon Web Services account. To list backups +// for a given table, specify TableName. ListBackups returns a paginated list +// of results with at most 1 MB worth of items in a page. You can also specify +// a maximum number of entries to be returned in a page. // // In the request, start time is inclusive, but end time is exclusive. Note // that these boundaries are for the time at which the original backup was requested. @@ -4421,26 +4433,26 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // // This topic provides general information about the PutItem API. // -// For information on how to call the PutItem API using the AWS SDK in specific -// languages, see the following: +// For information on how to call the PutItem API using the Amazon Web Services +// SDK in specific languages, see the following: // -// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) +// * PutItem in the Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for Python (Boto) (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) // -// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) +// * PutItem in the SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) // // When you add an item, the primary key attributes are the only required attributes. // Attribute values cannot be null. @@ -4474,11 +4486,11 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // A condition specified in the operation could not be evaluated. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -4494,7 +4506,7 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -4597,9 +4609,10 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // Query API operation for Amazon DynamoDB. // -// The Query operation finds items based on primary key values. You can query -// any table or secondary index that has a composite primary key (a partition -// key and a sort key). +// You must provide the name of the partition key attribute and a single value +// for that attribute. Query returns all items with that partition key value. +// Optionally, you can provide a sort key attribute and use a comparison operator +// to refine the search results. // // Use the KeyConditionExpression parameter to provide a specific value for // the partition key. The Query operation will return all of the items from @@ -4655,11 +4668,11 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // // Returned Error Types: // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -4668,7 +4681,7 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -5179,11 +5192,11 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // // Returned Error Types: // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -5192,7 +5205,7 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -5480,8 +5493,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // and Region. A TransactGetItems call can contain up to 25 TransactGetItem // objects, each of which contains a Get structure that specifies an item to // retrieve from a table in the account and Region. A call to TransactGetItems -// cannot retrieve items from tables in more than one AWS account or Region. -// The aggregate size of the items in the transaction cannot exceed 4 MB. +// cannot retrieve items from tables in more than one Amazon Web Services account +// or Region. The aggregate size of the items in the transaction cannot exceed +// 4 MB. // // DynamoDB rejects the entire TransactGetItems request if any of the following // is true: @@ -5596,16 +5610,16 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // the item. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -5704,10 +5718,10 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // // TransactWriteItems is a synchronous write operation that groups up to 25 // action requests. These actions can target items in different tables, but -// not in different AWS accounts or Regions, and no two actions can target the -// same item. For example, you cannot both ConditionCheck and Update the same -// item. The aggregate size of the items in the transaction cannot exceed 4 -// MB. +// not in different Amazon Web Services accounts or Regions, and no two actions +// can target the same item. For example, you cannot both ConditionCheck and +// Update the same item. The aggregate size of the items in the transaction +// cannot exceed 4 MB. // // The actions are completed atomically so that either all of them succeed, // or all of them fail. They are defined by the following objects: @@ -5864,16 +5878,16 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // payload but with an idempotent token that was already used. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -6199,6 +6213,12 @@ func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsi // UpdateContributorInsights API operation for Amazon DynamoDB. // // Updates the status for contributor insights for a specific table or index. +// CloudWatch Contributor Insights for DynamoDB graphs display the partition +// key and (if applicable) sort key of frequently accessed items and frequently +// throttled items in plaintext. If you require the use of AWS Key Management +// Service (KMS) to encrypt this table’s partition key and sort key data with +// an AWS managed key or customer managed key, you should not enable CloudWatch +// Contributor Insights for DynamoDB for this table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6595,11 +6615,11 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque // A condition specified in the operation could not be evaluated. // // * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // // * ResourceNotFoundException @@ -6615,7 +6635,7 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque // // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. // // * InternalServerError @@ -7063,8 +7083,8 @@ type ArchivalSummary struct { // is: // // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to - // the table's AWS KMS key being inaccessible for more than seven days. An - // On-Demand backup was created at the archival time. + // the table's KMS key being inaccessible for more than seven days. An On-Demand + // backup was created at the archival time. ArchivalReason *string `type:"string"` } @@ -7958,7 +7978,7 @@ type BackupDetails struct { // no additional cost). System backups allow you to restore the deleted table // to the state it was in just before the point of deletion. // - // * AWS_BACKUP - On-demand backup created by you from AWS Backup service. + // * AWS_BACKUP - On-demand backup created by you from Backup service. // // BackupType is a required field BackupType *string `type:"string" required:"true" enum:"BackupType"` @@ -8185,7 +8205,7 @@ type BackupSummary struct { // no additional cost). System backups allow you to restore the deleted table // to the state it was in just before the point of deletion. // - // * AWS_BACKUP - On-demand backup created by you from AWS Backup service. + // * AWS_BACKUP - On-demand backup created by you from Backup service. BackupType *string `type:"string" enum:"BackupType"` // ARN associated with the table. @@ -10060,10 +10080,10 @@ type CreateReplicationGroupMemberAction struct { // Replica-specific global secondary index settings. GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` - // The AWS KMS customer master key (CMK) that should be used for AWS KMS encryption - // in the new replica. To specify a CMK, use its key ID, Amazon Resource Name - // (ARN), alias name, or alias ARN. Note that you should only provide this parameter - // if the key is different from the default DynamoDB KMS master key alias/aws/dynamodb. + // The KMS key that should be used for KMS encryption in the new replica. To + // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or + // alias ARN. Note that you should only provide this parameter if the key is + // different from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` // Replica-specific provisioned throughput. If not specified, uses the source @@ -11472,13 +11492,13 @@ func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContr type DescribeContributorInsightsOutput struct { _ struct{} `type:"structure"` - // List of names of the associated Alpine rules. + // List of names of the associated contributor insights rules. ContributorInsightsRuleList []*string `type:"list"` - // Current Status contributor insights. + // Current status of contributor insights. ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` - // Returns information about the last failure that encountered. + // Returns information about the last failure that was encountered. // // The most common exceptions for a FAILED status are: // @@ -12711,7 +12731,7 @@ type ExecuteStatementOutput struct { _ struct{} `type:"structure"` // If a read operation was used, this property will contain the result of the - // reade operation; a map of attribute names and their values. For the write + // read operation; a map of attribute names and their values. For the write // operations this value will be empty. Items []map[string]*AttributeValue `type:"list"` @@ -13190,7 +13210,8 @@ type ExportDescription struct { // The name of the Amazon S3 bucket containing the export. S3Bucket *string `type:"string"` - // The ID of the AWS account that owns the bucket containing the export. + // The ID of the Amazon Web Services account that owns the bucket containing + // the export. S3BucketOwner *string `type:"string"` // The Amazon S3 bucket prefix used as the file name and path of the exported @@ -13202,10 +13223,10 @@ type ExportDescription struct { // // * AES256 - server-side encryption with Amazon S3 managed keys // - // * KMS - server-side encryption with AWS KMS managed keys + // * KMS - server-side encryption with KMS managed keys S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // The ID of the KMS managed key used to encrypt the S3 bucket where export // data is stored (if applicable). S3SseKmsKeyId *string `min:"1" type:"string"` @@ -13487,8 +13508,8 @@ type ExportTableToPointInTimeInput struct { // S3Bucket is a required field S3Bucket *string `type:"string" required:"true"` - // The ID of the AWS account that owns the bucket the export will be stored - // in. + // The ID of the Amazon Web Services account that owns the bucket the export + // will be stored in. S3BucketOwner *string `type:"string"` // The Amazon S3 bucket prefix to use as the file name and path of the exported @@ -13500,10 +13521,10 @@ type ExportTableToPointInTimeInput struct { // // * AES256 - server-side encryption with Amazon S3 managed keys // - // * KMS - server-side encryption with AWS KMS managed keys + // * KMS - server-side encryption with KMS managed keys S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"` - // The ID of the AWS KMS managed key used to encrypt the S3 bucket where export + // The ID of the KMS managed key used to encrypt the S3 bucket where export // data will be stored (if applicable). S3SseKmsKeyId *string `min:"1" type:"string"` @@ -17078,11 +17099,11 @@ func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *Provi return s } -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. type ProvisionedThroughputExceededException struct { _ struct{} `type:"structure"` @@ -17447,6 +17468,8 @@ type PutItemInput struct { // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the // content of the old item is returned. // + // The values returned are strongly consistent. + // // The ReturnValues parameter is used by several DynamoDB operations; however, // PutItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` @@ -18481,8 +18504,7 @@ type ReplicaDescription struct { // Replica-specific global secondary index settings. GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexDescription `type:"list"` - // The AWS KMS customer master key (CMK) of the replica that will be used for - // AWS KMS encryption. + // The KMS key of the replica that will be used for KMS encryption. KMSMasterKeyId *string `type:"string"` // Replica-specific provisioned throughput. If not described, uses the source @@ -18506,17 +18528,17 @@ type ReplicaDescription struct { // // * ACTIVE - The replica is ready for use. // - // * REGION_DISABLED - The replica is inaccessible because the AWS Region - // has been disabled. If the AWS Region remains inaccessible for more than + // * REGION_DISABLED - The replica is inaccessible because the Amazon Web + // Services Region has been disabled. If the Amazon Web Services Region remains + // inaccessible for more than 20 hours, DynamoDB will remove this replica + // from the replication group. The replica will not be deleted and replication + // will stop from and to this region. + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table is inaccessible. If the KMS key remains inaccessible for more than // 20 hours, DynamoDB will remove this replica from the replication group. // The replica will not be deleted and replication will stop from and to // this region. - // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The AWS KMS key used to encrypt - // the table is inaccessible. If the AWS KMS key remains inaccessible for - // more than 20 hours, DynamoDB will remove this replica from the replication - // group. The replica will not be deleted and replication will stop from - // and to this region. ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` // Detailed information about the replica status. @@ -19431,7 +19453,7 @@ func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction } // Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. type RequestLimitExceeded struct { _ struct{} `type:"structure"` @@ -20063,20 +20085,19 @@ type SSEDescription struct { _ struct{} `type:"structure"` // Indicates the time, in UNIX epoch date format, when DynamoDB detected that - // the table's AWS KMS key was inaccessible. This attribute will automatically - // be cleared when DynamoDB detects that the table's AWS KMS key is accessible - // again. DynamoDB will initiate the table archival process when table's AWS - // KMS key remains inaccessible for more than seven days from this date. + // the table's KMS key was inaccessible. This attribute will automatically be + // cleared when DynamoDB detects that the table's KMS key is accessible again. + // DynamoDB will initiate the table archival process when table's KMS key remains + // inaccessible for more than seven days from this date. InaccessibleEncryptionDateTime *time.Time `type:"timestamp"` - // The AWS KMS customer master key (CMK) ARN used for the AWS KMS encryption. + // The KMS key ARN used for the KMS encryption. KMSMasterKeyArn *string `type:"string"` // Server-side encryption type. The only supported value is: // - // * KMS - Server-side encryption that uses AWS Key Management Service. The - // key is stored in your account and is managed by AWS KMS (AWS KMS charges - // apply). + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). SSEType *string `type:"string" enum:"SSEType"` // Represents the current state of server-side encryption. The only supported @@ -20134,23 +20155,23 @@ func (s *SSEDescription) SetStatus(v string) *SSEDescription { type SSESpecification struct { _ struct{} `type:"structure"` - // Indicates whether server-side encryption is done using an AWS managed CMK - // or an AWS owned CMK. If enabled (true), server-side encryption type is set - // to KMS and an AWS managed CMK is used (AWS KMS charges apply). If disabled - // (false) or not specified, server-side encryption is set to AWS owned CMK. + // Indicates whether server-side encryption is done using an Amazon Web Services + // managed key or an Amazon Web Services owned key. If enabled (true), server-side + // encryption type is set to KMS and an Amazon Web Services managed key is used + // (KMS charges apply). If disabled (false) or not specified, server-side encryption + // is set to Amazon Web Services owned key. Enabled *bool `type:"boolean"` - // The AWS KMS customer master key (CMK) that should be used for the AWS KMS - // encryption. To specify a CMK, use its key ID, Amazon Resource Name (ARN), - // alias name, or alias ARN. Note that you should only provide this parameter - // if the key is different from the default DynamoDB customer master key alias/aws/dynamodb. + // The KMS key that should be used for the KMS encryption. To specify a key, + // use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note + // that you should only provide this parameter if the key is different from + // the default DynamoDB key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` // Server-side encryption type. The only supported value is: // - // * KMS - Server-side encryption that uses AWS Key Management Service. The - // key is stored in your account and is managed by AWS KMS (AWS KMS charges - // apply). + // * KMS - Server-side encryption that uses Key Management Service. The key + // is stored in your account and is managed by KMS (KMS charges apply). SSEType *string `type:"string" enum:"SSEType"` } @@ -21145,7 +21166,7 @@ type TableDescription struct { GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) - // in use, if the table is replicated across AWS Regions. + // in use, if the table is replicated across Amazon Web Services Regions. GlobalTableVersion *string `type:"string"` // The number of items in the specified table. DynamoDB updates this value approximately @@ -21180,7 +21201,7 @@ type TableDescription struct { // However, the combination of the following three elements is guaranteed to // be unique: // - // * AWS customer ID + // * Amazon Web Services customer ID // // * Table name // @@ -21265,10 +21286,10 @@ type TableDescription struct { // // * ACTIVE - The table is ready for use. // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The AWS KMS key used to encrypt - // the table in inaccessible. Table operations may fail due to failure to - // use the AWS KMS key. DynamoDB will initiate the table archival process - // when a table's AWS KMS key remains inaccessible for more than seven days. + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the + // table in inaccessible. Table operations may fail due to failure to use + // the KMS key. DynamoDB will initiate the table archival process when a + // table's KMS key remains inaccessible for more than seven days. // // * ARCHIVING - The table is being archived. Operations are not allowed // until archival is complete. @@ -21554,10 +21575,11 @@ func (s *TableNotFoundException) RequestID() string { // Describes a tag. A tag is a key-value pair. You can add up to 50 tags to // a single DynamoDB table. // -// AWS-assigned tag names and values are automatically assigned the aws: prefix, -// which the user cannot assign. AWS-assigned tag names do not count towards -// the tag limit of 50. User-assigned tag names have the prefix user: in the -// Cost Allocation Report. You cannot backdate the application of a tag. +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned +// tag names do not count towards the tag limit of 50. User-assigned tag names +// have the prefix user: in the Cost Allocation Report. You cannot backdate +// the application of a tag. // // For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) // in the Amazon DynamoDB Developer Guide. @@ -22144,8 +22166,8 @@ type TransactWriteItemsInput struct { // An ordered array of up to 25 TransactWriteItem objects, each of which contains // a ConditionCheck, Put, Update, or Delete object. These can operate on items - // in different tables, but the tables must reside in the same AWS account and - // Region, and no two of them can operate on the same item. + // in different tables, but the tables must reside in the same Amazon Web Services + // account and Region, and no two of them can operate on the same item. // // TransactItems is a required field TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` @@ -23739,11 +23761,10 @@ type UpdateReplicationGroupMemberAction struct { // Replica-specific global secondary index settings. GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"` - // The AWS KMS customer master key (CMK) of the replica that should be used - // for AWS KMS encryption. To specify a CMK, use its key ID, Amazon Resource - // Name (ARN), alias name, or alias ARN. Note that you should only provide this - // parameter if the key is different from the default DynamoDB KMS master key - // alias/aws/dynamodb. + // The KMS key of the replica that should be used for KMS encryption. To specify + // a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. + // Note that you should only provide this parameter if the key is different + // from the default DynamoDB KMS key alias/aws/dynamodb. KMSMasterKeyId *string `type:"string"` // Replica-specific provisioned throughput. If not specified, uses the source diff --git a/service/dynamodb/doc.go b/service/dynamodb/doc.go index f244a7330ea..c1fe4497832 100644 --- a/service/dynamodb/doc.go +++ b/service/dynamodb/doc.go @@ -12,15 +12,15 @@ // With DynamoDB, you can create database tables that can store and retrieve // any amount of data, and serve any level of request traffic. You can scale // up or scale down your tables' throughput capacity without downtime or performance -// degradation, and use the AWS Management Console to monitor resource utilization -// and performance metrics. +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. // // DynamoDB automatically spreads the data and traffic for your tables over // a sufficient number of servers to handle your throughput and storage requirements, // while maintaining consistent and fast performance. All of your data is stored // on solid state disks (SSDs) and automatically replicated across multiple -// Availability Zones in an AWS region, providing built-in high availability -// and data durability. +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. // // See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service. // diff --git a/service/dynamodb/errors.go b/service/dynamodb/errors.go index 517229dee89..9ffd8f2d0d1 100644 --- a/service/dynamodb/errors.go +++ b/service/dynamodb/errors.go @@ -129,11 +129,11 @@ const ( // ErrCodeProvisionedThroughputExceededException for service response error code // "ProvisionedThroughputExceededException". // - // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry - // requests that receive this exception. Your request is eventually successful, - // unless your retry queue is too large to finish. Reduce the frequency of requests - // and use exponential backoff. For more information, go to Error Retries and - // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) + // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB + // automatically retry requests that receive this exception. Your request is + // eventually successful, unless your retry queue is too large to finish. Reduce + // the frequency of requests and use exponential backoff. For more information, + // go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" @@ -153,7 +153,7 @@ const ( // "RequestLimitExceeded". // // Throughput exceeds the current throughput quota for your account. Please - // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request + // contact Amazon Web Services Support (https://aws.amazon.com/support) to request // a quota increase. ErrCodeRequestLimitExceeded = "RequestLimitExceeded" diff --git a/service/ec2/api.go b/service/ec2/api.go index aa7df2dfc8f..313c353e39e 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -59987,6 +59987,8 @@ type CreateRouteInput struct { // with a Wavelength Zone. CarrierGatewayId *string `type:"string"` + CoreNetworkArn *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. Routing decisions // are based on the most specific match. We modify the specified CIDR block // to its canonical form; for example, if you specify 100.68.0.18/18, we modify @@ -60078,6 +60080,12 @@ func (s *CreateRouteInput) SetCarrierGatewayId(v string) *CreateRouteInput { return s } +// SetCoreNetworkArn sets the CoreNetworkArn field's value. +func (s *CreateRouteInput) SetCoreNetworkArn(v string) *CreateRouteInput { + s.CoreNetworkArn = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *CreateRouteInput) SetDestinationCidrBlock(v string) *CreateRouteInput { s.DestinationCidrBlock = &v @@ -126133,6 +126141,8 @@ type ReplaceRouteInput struct { // [IPv4 traffic only] The ID of a carrier gateway. CarrierGatewayId *string `type:"string"` + CoreNetworkArn *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. The value that // you provide must match the CIDR of an existing route in the table. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -126223,6 +126233,12 @@ func (s *ReplaceRouteInput) SetCarrierGatewayId(v string) *ReplaceRouteInput { return s } +// SetCoreNetworkArn sets the CoreNetworkArn field's value. +func (s *ReplaceRouteInput) SetCoreNetworkArn(v string) *ReplaceRouteInput { + s.CoreNetworkArn = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *ReplaceRouteInput) SetDestinationCidrBlock(v string) *ReplaceRouteInput { s.DestinationCidrBlock = &v @@ -130354,6 +130370,8 @@ type Route struct { // The ID of the carrier gateway. CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + CoreNetworkArn *string `locationName:"coreNetworkArn" type:"string"` + // The IPv4 CIDR block used for the destination match. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -130430,6 +130448,12 @@ func (s *Route) SetCarrierGatewayId(v string) *Route { return s } +// SetCoreNetworkArn sets the CoreNetworkArn field's value. +func (s *Route) SetCoreNetworkArn(v string) *Route { + s.CoreNetworkArn = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *Route) SetDestinationCidrBlock(v string) *Route { s.DestinationCidrBlock = &v diff --git a/service/translate/api.go b/service/translate/api.go index cfb82f4d117..b880ac6cd16 100644 --- a/service/translate/api.go +++ b/service/translate/api.go @@ -2372,11 +2372,15 @@ type GetTerminologyInput struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The data format of the custom terminology being retrieved, either CSV or - // TMX. + // The data format of the custom terminology being retrieved. // - // TerminologyDataFormat is a required field - TerminologyDataFormat *string `type:"string" required:"true" enum:"TerminologyDataFormat"` + // If you don't specify this parameter, Amazon Translate returns a file that + // has the same format as the file that was imported to create the terminology. + // + // If you specify this parameter when you retrieve a multi-directional terminology + // resource, you must specify the same format as that of the input file that + // was imported to create it. Otherwise, Amazon Translate throws an error. + TerminologyDataFormat *string `type:"string" enum:"TerminologyDataFormat"` } // String returns the string representation. @@ -2406,9 +2410,6 @@ func (s *GetTerminologyInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.TerminologyDataFormat == nil { - invalidParams.Add(request.NewErrParamRequired("TerminologyDataFormat")) - } if invalidParams.Len() > 0 { return invalidParams @@ -2431,6 +2432,12 @@ func (s *GetTerminologyInput) SetTerminologyDataFormat(v string) *GetTerminology type GetTerminologyOutput struct { _ struct{} `type:"structure"` + // The Amazon S3 location of a file that provides any errors or warnings that + // were produced by your input file. This file was created when Amazon Translate + // attempted to create a terminology resource. The location is returned as a + // presigned URL to that has a 30 minute expiration. + AuxiliaryDataLocation *TerminologyDataLocation `type:"structure"` + // The data location of the custom terminology being retrieved. The custom terminology // file is returned in a presigned url that has a 30 minute expiration. TerminologyDataLocation *TerminologyDataLocation `type:"structure"` @@ -2457,6 +2464,12 @@ func (s GetTerminologyOutput) GoString() string { return s.String() } +// SetAuxiliaryDataLocation sets the AuxiliaryDataLocation field's value. +func (s *GetTerminologyOutput) SetAuxiliaryDataLocation(v *TerminologyDataLocation) *GetTerminologyOutput { + s.AuxiliaryDataLocation = v + return s +} + // SetTerminologyDataLocation sets the TerminologyDataLocation field's value. func (s *GetTerminologyOutput) SetTerminologyDataLocation(v *TerminologyDataLocation) *GetTerminologyOutput { s.TerminologyDataLocation = v @@ -2579,6 +2592,12 @@ func (s *ImportTerminologyInput) SetTerminologyData(v *TerminologyData) *ImportT type ImportTerminologyOutput struct { _ struct{} `type:"structure"` + // The Amazon S3 location of a file that provides any errors or warnings that + // were produced by your input file. This file was created when Amazon Translate + // attempted to create a terminology resource. The location is returned as a + // presigned URL to that has a 30 minute expiration. + AuxiliaryDataLocation *TerminologyDataLocation `type:"structure"` + // The properties of the custom terminology being imported. TerminologyProperties *TerminologyProperties `type:"structure"` } @@ -2601,6 +2620,12 @@ func (s ImportTerminologyOutput) GoString() string { return s.String() } +// SetAuxiliaryDataLocation sets the AuxiliaryDataLocation field's value. +func (s *ImportTerminologyOutput) SetAuxiliaryDataLocation(v *TerminologyDataLocation) *ImportTerminologyOutput { + s.AuxiliaryDataLocation = v + return s +} + // SetTerminologyProperties sets the TerminologyProperties field's value. func (s *ImportTerminologyOutput) SetTerminologyProperties(v *TerminologyProperties) *ImportTerminologyOutput { s.TerminologyProperties = v @@ -4258,6 +4283,27 @@ func (s *Term) SetTargetText(v string) *Term { type TerminologyData struct { _ struct{} `type:"structure"` + // The directionality of your terminology resource indicates whether it has + // one source language (uni-directional) or multiple (multi-directional). + // + // UNI + // + // The terminology resource has one source language (for example, the first + // column in a CSV file), and all of its other languages are target languages. + // + // MULTI + // + // Any language in the terminology resource can be the source language or a + // target language. A single multi-directional terminology resource can be used + // for jobs that translate different language pairs. For example, if the terminology + // contains terms in English and Spanish, then it can be used for jobs that + // translate English to Spanish and jobs that translate Spanish to English. + // + // When you create a custom terminology resource without specifying the directionality, + // it behaves as uni-directional terminology, although this parameter will have + // a null value. + Directionality *string `type:"string" enum:"Directionality"` + // The file containing the custom terminology data. Your version of the AWS // SDK performs a Base64-encoding on this field before sending a request to // the AWS service. Users of the SDK should not perform Base64-encoding themselves. @@ -4271,7 +4317,7 @@ type TerminologyData struct { // File is a required field File []byte `type:"blob" required:"true" sensitive:"true"` - // The data format of the custom terminology. Either CSV or TMX. + // The data format of the custom terminology. // // Format is a required field Format *string `type:"string" required:"true" enum:"TerminologyDataFormat"` @@ -4311,6 +4357,12 @@ func (s *TerminologyData) Validate() error { return nil } +// SetDirectionality sets the Directionality field's value. +func (s *TerminologyData) SetDirectionality(v string) *TerminologyData { + s.Directionality = &v + return s +} + // SetFile sets the File field's value. func (s *TerminologyData) SetFile(v []byte) *TerminologyData { s.File = v @@ -4381,24 +4433,47 @@ type TerminologyProperties struct { // The description of the custom terminology properties. Description *string `type:"string"` + // The directionality of your terminology resource indicates whether it has + // one source language (uni-directional) or multiple (multi-directional). + // + // UNI + // + // The terminology resource has one source language (the first column in a CSV + // file), and all of its other languages are target languages. + // + // MULTI + // + // Any language in the terminology resource can be the source language. + Directionality *string `type:"string" enum:"Directionality"` + // The encryption key for the custom terminology. EncryptionKey *EncryptionKey `type:"structure"` + // The format of the custom terminology input file. + Format *string `type:"string" enum:"TerminologyDataFormat"` + // The time at which the custom terminology was last update, based on the timestamp. LastUpdatedAt *time.Time `type:"timestamp"` + // Additional information from Amazon Translate about the terminology resource. + Message *string `type:"string"` + // The name of the custom terminology. Name *string `min:"1" type:"string"` // The size of the file used when importing a custom terminology. SizeBytes *int64 `type:"integer"` + // The number of terms in the input file that Amazon Translate skipped when + // you created or updated the terminology resource. + SkippedTermCount *int64 `type:"integer"` + // The language code for the source text of the translation request for which // the custom terminology is being used. SourceLanguageCode *string `min:"2" type:"string"` // The language codes for the target languages available with the custom terminology - // file. All possible target languages are returned in array. + // resource. All possible target languages are returned in array. TargetLanguageCodes []*string `type:"list"` // The number of terms included in the custom terminology. @@ -4441,18 +4516,36 @@ func (s *TerminologyProperties) SetDescription(v string) *TerminologyProperties return s } +// SetDirectionality sets the Directionality field's value. +func (s *TerminologyProperties) SetDirectionality(v string) *TerminologyProperties { + s.Directionality = &v + return s +} + // SetEncryptionKey sets the EncryptionKey field's value. func (s *TerminologyProperties) SetEncryptionKey(v *EncryptionKey) *TerminologyProperties { s.EncryptionKey = v return s } +// SetFormat sets the Format field's value. +func (s *TerminologyProperties) SetFormat(v string) *TerminologyProperties { + s.Format = &v + return s +} + // SetLastUpdatedAt sets the LastUpdatedAt field's value. func (s *TerminologyProperties) SetLastUpdatedAt(v time.Time) *TerminologyProperties { s.LastUpdatedAt = &v return s } +// SetMessage sets the Message field's value. +func (s *TerminologyProperties) SetMessage(v string) *TerminologyProperties { + s.Message = &v + return s +} + // SetName sets the Name field's value. func (s *TerminologyProperties) SetName(v string) *TerminologyProperties { s.Name = &v @@ -4465,6 +4558,12 @@ func (s *TerminologyProperties) SetSizeBytes(v int64) *TerminologyProperties { return s } +// SetSkippedTermCount sets the SkippedTermCount field's value. +func (s *TerminologyProperties) SetSkippedTermCount(v int64) *TerminologyProperties { + s.SkippedTermCount = &v + return s +} + // SetSourceLanguageCode sets the SourceLanguageCode field's value. func (s *TerminologyProperties) SetSourceLanguageCode(v string) *TerminologyProperties { s.SourceLanguageCode = &v @@ -5237,6 +5336,22 @@ func (s *UpdateParallelDataOutput) SetStatus(v string) *UpdateParallelDataOutput return s } +const ( + // DirectionalityUni is a Directionality enum value + DirectionalityUni = "UNI" + + // DirectionalityMulti is a Directionality enum value + DirectionalityMulti = "MULTI" +) + +// Directionality_Values returns all elements of the Directionality enum +func Directionality_Values() []string { + return []string{ + DirectionalityUni, + DirectionalityMulti, + } +} + const ( // EncryptionKeyTypeKms is a EncryptionKeyType enum value EncryptionKeyTypeKms = "KMS" @@ -5351,6 +5466,9 @@ const ( // TerminologyDataFormatTmx is a TerminologyDataFormat enum value TerminologyDataFormatTmx = "TMX" + + // TerminologyDataFormatTsv is a TerminologyDataFormat enum value + TerminologyDataFormatTsv = "TSV" ) // TerminologyDataFormat_Values returns all elements of the TerminologyDataFormat enum @@ -5358,5 +5476,6 @@ func TerminologyDataFormat_Values() []string { return []string{ TerminologyDataFormatCsv, TerminologyDataFormatTmx, + TerminologyDataFormatTsv, } }