diff --git a/clients/client-accessanalyzer/src/endpoints.ts b/clients/client-accessanalyzer/src/endpoints.ts index 8a7f44a40ddb4..b6df9a040b4f5 100644 --- a/clients/client-accessanalyzer/src/endpoints.ts +++ b/clients/client-accessanalyzer/src/endpoints.ts @@ -162,6 +162,10 @@ const partitionHash: PartitionHash = { hostname: "access-analyzer.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "access-analyzer-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -172,6 +176,10 @@ const partitionHash: PartitionHash = { hostname: "access-analyzer.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "access-analyzer-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-account/src/endpoints.ts b/clients/client-account/src/endpoints.ts index b6aeaa257e7f4..53371d440b753 100644 --- a/clients/client-account/src/endpoints.ts +++ b/clients/client-account/src/endpoints.ts @@ -100,6 +100,10 @@ const partitionHash: PartitionHash = { hostname: "account.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "account-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -110,6 +114,10 @@ const partitionHash: PartitionHash = { hostname: "account.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "account-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-acm-pca/src/endpoints.ts b/clients/client-acm-pca/src/endpoints.ts index 33bfa6eaf6368..01a36730f6fdb 100644 --- a/clients/client-acm-pca/src/endpoints.ts +++ b/clients/client-acm-pca/src/endpoints.ts @@ -168,6 +168,10 @@ const partitionHash: PartitionHash = { hostname: "acm-pca.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "acm-pca-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -178,6 +182,10 @@ const partitionHash: PartitionHash = { hostname: "acm-pca.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "acm-pca-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-acm/src/endpoints.ts b/clients/client-acm/src/endpoints.ts index 410e2de2c4557..2928c2f103c0f 100644 --- a/clients/client-acm/src/endpoints.ts +++ b/clients/client-acm/src/endpoints.ts @@ -162,6 +162,10 @@ const partitionHash: PartitionHash = { hostname: "acm.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "acm-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -172,6 +176,10 @@ const partitionHash: PartitionHash = { hostname: "acm.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "acm-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-alexa-for-business/src/endpoints.ts b/clients/client-alexa-for-business/src/endpoints.ts index f7cf538e5ff8b..ca011526d7916 100644 --- a/clients/client-alexa-for-business/src/endpoints.ts +++ b/clients/client-alexa-for-business/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "a4b.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "a4b-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "a4b.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "a4b-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-amp/src/endpoints.ts b/clients/client-amp/src/endpoints.ts index 0148864ecce6b..b6c2de169c9c0 100644 --- a/clients/client-amp/src/endpoints.ts +++ b/clients/client-amp/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "aps.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "aps-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "aps.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "aps-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-amplify/src/endpoints.ts b/clients/client-amplify/src/endpoints.ts index 1f4ef6d07d586..d5255c75171df 100644 --- a/clients/client-amplify/src/endpoints.ts +++ b/clients/client-amplify/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "amplify.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "amplify-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "amplify.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "amplify-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-amplifybackend/src/endpoints.ts b/clients/client-amplifybackend/src/endpoints.ts index 94d3af5fea34e..87a2fdf8c0d39 100644 --- a/clients/client-amplifybackend/src/endpoints.ts +++ b/clients/client-amplifybackend/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "amplifybackend.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "amplifybackend-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "amplifybackend.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "amplifybackend-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-api-gateway/src/endpoints.ts b/clients/client-api-gateway/src/endpoints.ts index cbb56c1dabbad..b1fd36857c963 100644 --- a/clients/client-api-gateway/src/endpoints.ts +++ b/clients/client-api-gateway/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "apigateway.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "apigateway-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "apigateway.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "apigateway-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-apigatewaymanagementapi/src/endpoints.ts b/clients/client-apigatewaymanagementapi/src/endpoints.ts index 2089a92d373fa..265b2f5826553 100644 --- a/clients/client-apigatewaymanagementapi/src/endpoints.ts +++ b/clients/client-apigatewaymanagementapi/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "execute-api.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "execute-api-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "execute-api.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "execute-api-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-apigatewayv2/src/endpoints.ts b/clients/client-apigatewayv2/src/endpoints.ts index cbb56c1dabbad..b1fd36857c963 100644 --- a/clients/client-apigatewayv2/src/endpoints.ts +++ b/clients/client-apigatewayv2/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "apigateway.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "apigateway-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "apigateway.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "apigateway-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-app-mesh/src/endpoints.ts b/clients/client-app-mesh/src/endpoints.ts index 3e95275914af2..9ff019b55eeb4 100644 --- a/clients/client-app-mesh/src/endpoints.ts +++ b/clients/client-app-mesh/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "appmesh.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "appmesh-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "appmesh.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "appmesh-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-appconfig/src/endpoints.ts b/clients/client-appconfig/src/endpoints.ts index 2d665fd5e36cf..6f747c5bce871 100644 --- a/clients/client-appconfig/src/endpoints.ts +++ b/clients/client-appconfig/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "appconfig.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "appconfig-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "appconfig.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "appconfig-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-appflow/src/endpoints.ts b/clients/client-appflow/src/endpoints.ts index cf2246534886d..9304b6a8933fa 100644 --- a/clients/client-appflow/src/endpoints.ts +++ b/clients/client-appflow/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "appflow.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "appflow-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "appflow.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "appflow-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-appintegrations/src/endpoints.ts b/clients/client-appintegrations/src/endpoints.ts index 85773cbd7a345..20a9bbec08039 100644 --- a/clients/client-appintegrations/src/endpoints.ts +++ b/clients/client-appintegrations/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "app-integrations.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "app-integrations-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "app-integrations.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "app-integrations-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-application-auto-scaling/src/endpoints.ts b/clients/client-application-auto-scaling/src/endpoints.ts index 66db4fad79c87..3e6b3de3ed6c1 100644 --- a/clients/client-application-auto-scaling/src/endpoints.ts +++ b/clients/client-application-auto-scaling/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "application-autoscaling.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "application-autoscaling-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "application-autoscaling.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "application-autoscaling-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-application-discovery-service/src/endpoints.ts b/clients/client-application-discovery-service/src/endpoints.ts index 68a66ed019dd8..a16eafe235309 100644 --- a/clients/client-application-discovery-service/src/endpoints.ts +++ b/clients/client-application-discovery-service/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "discovery.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "discovery-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "discovery.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "discovery-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-application-insights/src/endpoints.ts b/clients/client-application-insights/src/endpoints.ts index 8e292d1b3243e..12337f49796d0 100644 --- a/clients/client-application-insights/src/endpoints.ts +++ b/clients/client-application-insights/src/endpoints.ts @@ -97,6 +97,10 @@ const partitionHash: PartitionHash = { hostname: "applicationinsights.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "applicationinsights-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -107,6 +111,10 @@ const partitionHash: PartitionHash = { hostname: "applicationinsights.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "applicationinsights-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-applicationcostprofiler/src/endpoints.ts b/clients/client-applicationcostprofiler/src/endpoints.ts index 2a565f4bfe3b3..e27aeb4486c77 100644 --- a/clients/client-applicationcostprofiler/src/endpoints.ts +++ b/clients/client-applicationcostprofiler/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "application-cost-profiler.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "application-cost-profiler-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "application-cost-profiler.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "application-cost-profiler-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-apprunner/src/endpoints.ts b/clients/client-apprunner/src/endpoints.ts index 74bd24accde71..d3a8dbc4fae90 100644 --- a/clients/client-apprunner/src/endpoints.ts +++ b/clients/client-apprunner/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "apprunner.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "apprunner-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "apprunner.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "apprunner-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-appstream/src/endpoints.ts b/clients/client-appstream/src/endpoints.ts index 66a35e16414a0..bc91b82eac37f 100644 --- a/clients/client-appstream/src/endpoints.ts +++ b/clients/client-appstream/src/endpoints.ts @@ -118,6 +118,10 @@ const partitionHash: PartitionHash = { hostname: "appstream2.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "appstream2-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -128,6 +132,10 @@ const partitionHash: PartitionHash = { hostname: "appstream2.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "appstream2-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-appsync/src/endpoints.ts b/clients/client-appsync/src/endpoints.ts index db2adfa8e1e68..e5b91eadf6ed6 100644 --- a/clients/client-appsync/src/endpoints.ts +++ b/clients/client-appsync/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "appsync.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "appsync-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "appsync.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "appsync-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-athena/src/endpoints.ts b/clients/client-athena/src/endpoints.ts index 49709e3b82246..dd3bbdfd76eaf 100644 --- a/clients/client-athena/src/endpoints.ts +++ b/clients/client-athena/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "athena.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "athena-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "athena.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "athena-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-auditmanager/src/endpoints.ts b/clients/client-auditmanager/src/endpoints.ts index 41a9869905249..559f97315d701 100644 --- a/clients/client-auditmanager/src/endpoints.ts +++ b/clients/client-auditmanager/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "auditmanager.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "auditmanager-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "auditmanager.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "auditmanager-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-auto-scaling-plans/src/endpoints.ts b/clients/client-auto-scaling-plans/src/endpoints.ts index c8b11a3d177dd..0f92883351914 100644 --- a/clients/client-auto-scaling-plans/src/endpoints.ts +++ b/clients/client-auto-scaling-plans/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "autoscaling-plans.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "autoscaling-plans-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "autoscaling-plans.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "autoscaling-plans-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-auto-scaling/src/endpoints.ts b/clients/client-auto-scaling/src/endpoints.ts index 8cfe4234819ac..ae9eb15616637 100644 --- a/clients/client-auto-scaling/src/endpoints.ts +++ b/clients/client-auto-scaling/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "autoscaling.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "autoscaling-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "autoscaling.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "autoscaling-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-backup/src/endpoints.ts b/clients/client-backup/src/endpoints.ts index 9501a09e1bded..5082b4f7d2708 100644 --- a/clients/client-backup/src/endpoints.ts +++ b/clients/client-backup/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "backup.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "backup-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "backup.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "backup-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-backup/src/models/models_0.ts b/clients/client-backup/src/models/models_0.ts index b8329180cde97..fc74ac2ee800f 100644 --- a/clients/client-backup/src/models/models_0.ts +++ b/clients/client-backup/src/models/models_0.ts @@ -661,6 +661,36 @@ export namespace BackupPlanTemplatesListMember { }); } +export interface ConditionParameter { + ConditionKey?: string; + ConditionValue?: string; +} + +export namespace ConditionParameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConditionParameter): any => ({ + ...obj, + }); +} + +export interface Conditions { + StringEquals?: ConditionParameter[]; + StringNotEquals?: ConditionParameter[]; + StringLike?: ConditionParameter[]; + StringNotLike?: ConditionParameter[]; +} + +export namespace Conditions { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Conditions): any => ({ + ...obj, + }); +} + export enum ConditionType { STRINGEQUALS = "STRINGEQUALS", } @@ -727,6 +757,9 @@ export interface BackupSelection { * Assigns the backup plan to every resource with at least one matching tag.
*/ ListOfTags?: Condition[]; + + NotResources?: string[]; + Conditions?: Conditions; } export namespace BackupSelection { diff --git a/clients/client-backup/src/protocols/Aws_restJson1.ts b/clients/client-backup/src/protocols/Aws_restJson1.ts index 10cc4534a7812..15123e4e10c8c 100644 --- a/clients/client-backup/src/protocols/Aws_restJson1.ts +++ b/clients/client-backup/src/protocols/Aws_restJson1.ts @@ -194,6 +194,8 @@ import { BackupVaultListMember, CalculatedLifecycle, Condition, + ConditionParameter, + Conditions, ConflictException, ControlInputParameter, ControlScope, @@ -8465,9 +8467,13 @@ const serializeAws_restJson1BackupRulesInput = (input: BackupRuleInput[], contex const serializeAws_restJson1BackupSelection = (input: BackupSelection, context: __SerdeContext): any => { return { + ...(input.Conditions !== undefined && + input.Conditions !== null && { Conditions: serializeAws_restJson1Conditions(input.Conditions, context) }), ...(input.IamRoleArn !== undefined && input.IamRoleArn !== null && { IamRoleArn: input.IamRoleArn }), ...(input.ListOfTags !== undefined && input.ListOfTags !== null && { ListOfTags: serializeAws_restJson1ListOfTags(input.ListOfTags, context) }), + ...(input.NotResources !== undefined && + input.NotResources !== null && { NotResources: serializeAws_restJson1ResourceArns(input.NotResources, context) }), ...(input.Resources !== undefined && input.Resources !== null && { Resources: serializeAws_restJson1ResourceArns(input.Resources, context) }), ...(input.SelectionName !== undefined && input.SelectionName !== null && { SelectionName: input.SelectionName }), @@ -8508,6 +8514,46 @@ const serializeAws_restJson1Condition = (input: Condition, context: __SerdeConte }; }; +const serializeAws_restJson1ConditionParameter = (input: ConditionParameter, context: __SerdeContext): any => { + return { + ...(input.ConditionKey !== undefined && input.ConditionKey !== null && { ConditionKey: input.ConditionKey }), + ...(input.ConditionValue !== undefined && + input.ConditionValue !== null && { ConditionValue: input.ConditionValue }), + }; +}; + +const serializeAws_restJson1ConditionParameters = (input: ConditionParameter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ConditionParameter(entry, context); + }); +}; + +const serializeAws_restJson1Conditions = (input: Conditions, context: __SerdeContext): any => { + return { + ...(input.StringEquals !== undefined && + input.StringEquals !== null && { + StringEquals: serializeAws_restJson1ConditionParameters(input.StringEquals, context), + }), + ...(input.StringLike !== undefined && + input.StringLike !== null && { + StringLike: serializeAws_restJson1ConditionParameters(input.StringLike, context), + }), + ...(input.StringNotEquals !== undefined && + input.StringNotEquals !== null && { + StringNotEquals: serializeAws_restJson1ConditionParameters(input.StringNotEquals, context), + }), + ...(input.StringNotLike !== undefined && + input.StringNotLike !== null && { + StringNotLike: serializeAws_restJson1ConditionParameters(input.StringNotLike, context), + }), + }; +}; + const serializeAws_restJson1ControlInputParameter = (input: ControlInputParameter, context: __SerdeContext): any => { return { ...(input.ParameterName !== undefined && input.ParameterName !== null && { ParameterName: input.ParameterName }), @@ -8961,11 +9007,19 @@ const deserializeAws_restJson1BackupRules = (output: any, context: __SerdeContex const deserializeAws_restJson1BackupSelection = (output: any, context: __SerdeContext): BackupSelection => { return { + Conditions: + output.Conditions !== undefined && output.Conditions !== null + ? deserializeAws_restJson1Conditions(output.Conditions, context) + : undefined, IamRoleArn: __expectString(output.IamRoleArn), ListOfTags: output.ListOfTags !== undefined && output.ListOfTags !== null ? deserializeAws_restJson1ListOfTags(output.ListOfTags, context) : undefined, + NotResources: + output.NotResources !== undefined && output.NotResources !== null + ? deserializeAws_restJson1ResourceArns(output.NotResources, context) + : undefined, Resources: output.Resources !== undefined && output.Resources !== null ? deserializeAws_restJson1ResourceArns(output.Resources, context) @@ -9083,6 +9137,45 @@ const deserializeAws_restJson1Condition = (output: any, context: __SerdeContext) } as any; }; +const deserializeAws_restJson1ConditionParameter = (output: any, context: __SerdeContext): ConditionParameter => { + return { + ConditionKey: __expectString(output.ConditionKey), + ConditionValue: __expectString(output.ConditionValue), + } as any; +}; + +const deserializeAws_restJson1ConditionParameters = (output: any, context: __SerdeContext): ConditionParameter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ConditionParameter(entry, context); + }); +}; + +const deserializeAws_restJson1Conditions = (output: any, context: __SerdeContext): Conditions => { + return { + StringEquals: + output.StringEquals !== undefined && output.StringEquals !== null + ? deserializeAws_restJson1ConditionParameters(output.StringEquals, context) + : undefined, + StringLike: + output.StringLike !== undefined && output.StringLike !== null + ? deserializeAws_restJson1ConditionParameters(output.StringLike, context) + : undefined, + StringNotEquals: + output.StringNotEquals !== undefined && output.StringNotEquals !== null + ? deserializeAws_restJson1ConditionParameters(output.StringNotEquals, context) + : undefined, + StringNotLike: + output.StringNotLike !== undefined && output.StringNotLike !== null + ? deserializeAws_restJson1ConditionParameters(output.StringNotLike, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1ControlInputParameter = (output: any, context: __SerdeContext): ControlInputParameter => { return { ParameterName: __expectString(output.ParameterName), diff --git a/clients/client-batch/README.md b/clients/client-batch/README.md index bc4fec2b6a657..7597affc7804f 100644 --- a/clients/client-batch/README.md +++ b/clients/client-batch/README.md @@ -9,7 +9,7 @@ AWS SDK for JavaScript Batch Client for Node.js, Browser and React Native.Using Batch, you can run batch computing workloads on the Cloud. Batch computing is a common means for +
Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these @@ -18,7 +18,7 @@ helping you to eliminate capacity constraints, reduce compute costs, and deliver
As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus -your time and energy on analyzing results and solving your specific problems.
+your time and energy on analyzing results and solving your specific problems. ## Installing diff --git a/clients/client-batch/src/Batch.ts b/clients/client-batch/src/Batch.ts index f8993f9f76af0..31e2b43015ad4 100644 --- a/clients/client-batch/src/Batch.ts +++ b/clients/client-batch/src/Batch.ts @@ -12,6 +12,11 @@ import { CreateJobQueueCommandInput, CreateJobQueueCommandOutput, } from "./commands/CreateJobQueueCommand"; +import { + CreateSchedulingPolicyCommand, + CreateSchedulingPolicyCommandInput, + CreateSchedulingPolicyCommandOutput, +} from "./commands/CreateSchedulingPolicyCommand"; import { DeleteComputeEnvironmentCommand, DeleteComputeEnvironmentCommandInput, @@ -22,6 +27,11 @@ import { DeleteJobQueueCommandInput, DeleteJobQueueCommandOutput, } from "./commands/DeleteJobQueueCommand"; +import { + DeleteSchedulingPolicyCommand, + DeleteSchedulingPolicyCommandInput, + DeleteSchedulingPolicyCommandOutput, +} from "./commands/DeleteSchedulingPolicyCommand"; import { DeregisterJobDefinitionCommand, DeregisterJobDefinitionCommandInput, @@ -47,7 +57,17 @@ import { DescribeJobsCommandInput, DescribeJobsCommandOutput, } from "./commands/DescribeJobsCommand"; +import { + DescribeSchedulingPoliciesCommand, + DescribeSchedulingPoliciesCommandInput, + DescribeSchedulingPoliciesCommandOutput, +} from "./commands/DescribeSchedulingPoliciesCommand"; import { ListJobsCommand, ListJobsCommandInput, ListJobsCommandOutput } from "./commands/ListJobsCommand"; +import { + ListSchedulingPoliciesCommand, + ListSchedulingPoliciesCommandInput, + ListSchedulingPoliciesCommandOutput, +} from "./commands/ListSchedulingPoliciesCommand"; import { ListTagsForResourceCommand, ListTagsForResourceCommandInput, @@ -80,10 +100,15 @@ import { UpdateJobQueueCommandInput, UpdateJobQueueCommandOutput, } from "./commands/UpdateJobQueueCommand"; +import { + UpdateSchedulingPolicyCommand, + UpdateSchedulingPolicyCommandInput, + UpdateSchedulingPolicyCommandOutput, +} from "./commands/UpdateSchedulingPolicyCommand"; /** *Using Batch, you can run batch computing workloads on the Cloud. Batch computing is a common means for + *
Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for * developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of * this computing workload to remove the undifferentiated heavy lifting of configuring and managing required * infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these @@ -92,7 +117,7 @@ import { *
As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically * provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific * workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus - * your time and energy on analyzing results and solving your specific problems.
+ * your time and energy on analyzing results and solving your specific problems. */ export class Batch extends BatchClient { /** @@ -132,18 +157,18 @@ export class Batch extends BatchClient { * within the environment. This is based on the compute resource specification that you define or the launch template that you * specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot * Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can - * optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a - * specified percentage of the On-Demand price. + * optionally set a maximum price so that Spot Instances only launch + * when + * the Spot Instance price is less than a specified percentage of the On-Demand price. *Multi-node parallel jobs aren't supported on Spot Instances.
*In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility * with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that - * each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance - * AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you - * can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with - * it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS - * container instance in the Amazon Elastic Container Service Developer Guide.
+ * each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the + * Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch + * your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the + * Amazon Elastic Container Service Developer Guide. *Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it * doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible @@ -232,6 +257,39 @@ export class Batch extends BatchClient { } } + /** + *
Creates an Batch scheduling + * policy.
+ */ + public createSchedulingPolicy( + args: CreateSchedulingPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes an Batch compute environment.
*Before you can delete a compute environment, you must set its state to DISABLED
with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use Fargate resources must terminate all
@@ -302,6 +360,40 @@ export class Batch extends BatchClient {
}
}
+ /**
+ *
Deletes the specified scheduling + * policy.
+ *You can't delete a scheduling policy that is used in any job queues.
+ */ + public deleteSchedulingPolicy( + args: DeleteSchedulingPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeregisters an Batch job definition. Job definitions are permanently deleted after 180 days.
*/ @@ -463,6 +555,39 @@ export class Batch extends BatchClient { } } + /** + *Describes one or more of your scheduling + * policies.
+ */ + public describeSchedulingPolicies( + args: DescribeSchedulingPoliciesCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of Batch jobs.
*You must specify only one of the following items:
@@ -504,8 +629,41 @@ export class Batch extends BatchClient { } /** - *Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, and job - * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
+ *Returns a list of Batch scheduling + * policies.
+ */ + public listSchedulingPolicies( + args: ListSchedulingPoliciesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, job queues, + * and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
*/ public listTagsForResource( args: ListTagsForResourceCommandInput, @@ -571,10 +729,15 @@ export class Batch extends BatchClient { /** *Submits an Batch job from a job definition. Parameters that are specified during SubmitJob
* override parameters defined in the job definition. vCPU and memory requirements that are specified in the
- * ResourceRequirements
objects in the job definition are the exception. They can't be overridden this way
+ * resourceRequirements
objects in the job definition are the exception. They can't be overridden this way
* using the memory
and vcpus
parameters. Rather, you must specify updates to job definition
* parameters in a ResourceRequirements
object that's included in the containerOverrides
* parameter.
Job queues with a scheduling policy are limited to 500 active fair share identifiers at a time.
+ *Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 * days, Fargate resources might become unavailable and job might be terminated.
@@ -606,8 +769,8 @@ export class Batch extends BatchClient { /** *Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a
* resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that
- * are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, and job
- * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
Updates a scheduling + * policy.
+ */ + public updateSchedulingPolicy( + args: UpdateSchedulingPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseUsing Batch, you can run batch computing workloads on the Cloud. Batch computing is a common means for + *
Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for * developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of * this computing workload to remove the undifferentiated heavy lifting of configuring and managing required * infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these @@ -295,7 +325,7 @@ export interface BatchClientResolvedConfig extends BatchClientResolvedConfigType *
As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically * provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific * workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus - * your time and energy on analyzing results and solving your specific problems.
+ * your time and energy on analyzing results and solving your specific problems. */ export class BatchClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts index 398d677b6b0b3..32957a76fd630 100644 --- a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts +++ b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts @@ -29,18 +29,18 @@ export interface CreateComputeEnvironmentCommandOutput extends CreateComputeEnvi * within the environment. This is based on the compute resource specification that you define or the launch template that you * specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot * Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can - * optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a - * specified percentage of the On-Demand price. + * optionally set a maximum price so that Spot Instances only launch + * when + * the Spot Instance price is less than a specified percentage of the On-Demand price. *Multi-node parallel jobs aren't supported on Spot Instances.
*In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility * with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that - * each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance - * AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you - * can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with - * it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS - * container instance in the Amazon Elastic Container Service Developer Guide.
+ * each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the + * Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch + * your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the + * Amazon Elastic Container Service Developer Guide. *Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it * doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible diff --git a/clients/client-batch/src/commands/CreateSchedulingPolicyCommand.ts b/clients/client-batch/src/commands/CreateSchedulingPolicyCommand.ts new file mode 100644 index 0000000000000..30ad6f1502333 --- /dev/null +++ b/clients/client-batch/src/commands/CreateSchedulingPolicyCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BatchClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BatchClient"; +import { CreateSchedulingPolicyRequest, CreateSchedulingPolicyResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateSchedulingPolicyCommand, + serializeAws_restJson1CreateSchedulingPolicyCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateSchedulingPolicyCommandInput extends CreateSchedulingPolicyRequest {} +export interface CreateSchedulingPolicyCommandOutput extends CreateSchedulingPolicyResponse, __MetadataBearer {} + +/** + *
Creates an Batch scheduling + * policy.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BatchClient, CreateSchedulingPolicyCommand } from "@aws-sdk/client-batch"; // ES Modules import + * // const { BatchClient, CreateSchedulingPolicyCommand } = require("@aws-sdk/client-batch"); // CommonJS import + * const client = new BatchClient(config); + * const command = new CreateSchedulingPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateSchedulingPolicyCommandInput} for command's `input` shape. + * @see {@link CreateSchedulingPolicyCommandOutput} for command's `response` shape. + * @see {@link BatchClientResolvedConfig | config} for BatchClient's `config` shape. + * + */ +export class CreateSchedulingPolicyCommand extends $Command< + CreateSchedulingPolicyCommandInput, + CreateSchedulingPolicyCommandOutput, + BatchClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateSchedulingPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes the specified scheduling + * policy.
+ *You can't delete a scheduling policy that is used in any job queues.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BatchClient, DeleteSchedulingPolicyCommand } from "@aws-sdk/client-batch"; // ES Modules import + * // const { BatchClient, DeleteSchedulingPolicyCommand } = require("@aws-sdk/client-batch"); // CommonJS import + * const client = new BatchClient(config); + * const command = new DeleteSchedulingPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteSchedulingPolicyCommandInput} for command's `input` shape. + * @see {@link DeleteSchedulingPolicyCommandOutput} for command's `response` shape. + * @see {@link BatchClientResolvedConfig | config} for BatchClient's `config` shape. + * + */ +export class DeleteSchedulingPolicyCommand extends $Command< + DeleteSchedulingPolicyCommandInput, + DeleteSchedulingPolicyCommandOutput, + BatchClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteSchedulingPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes one or more of your scheduling + * policies.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BatchClient, DescribeSchedulingPoliciesCommand } from "@aws-sdk/client-batch"; // ES Modules import + * // const { BatchClient, DescribeSchedulingPoliciesCommand } = require("@aws-sdk/client-batch"); // CommonJS import + * const client = new BatchClient(config); + * const command = new DescribeSchedulingPoliciesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeSchedulingPoliciesCommandInput} for command's `input` shape. + * @see {@link DescribeSchedulingPoliciesCommandOutput} for command's `response` shape. + * @see {@link BatchClientResolvedConfig | config} for BatchClient's `config` shape. + * + */ +export class DescribeSchedulingPoliciesCommand extends $Command< + DescribeSchedulingPoliciesCommandInput, + DescribeSchedulingPoliciesCommandOutput, + BatchClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeSchedulingPoliciesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of Batch scheduling + * policies.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BatchClient, ListSchedulingPoliciesCommand } from "@aws-sdk/client-batch"; // ES Modules import + * // const { BatchClient, ListSchedulingPoliciesCommand } = require("@aws-sdk/client-batch"); // CommonJS import + * const client = new BatchClient(config); + * const command = new ListSchedulingPoliciesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListSchedulingPoliciesCommandInput} for command's `input` shape. + * @see {@link ListSchedulingPoliciesCommandOutput} for command's `response` shape. + * @see {@link BatchClientResolvedConfig | config} for BatchClient's `config` shape. + * + */ +export class ListSchedulingPoliciesCommand extends $Command< + ListSchedulingPoliciesCommandInput, + ListSchedulingPoliciesCommandOutput, + BatchClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListSchedulingPoliciesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, and job - * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
+ *Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, job queues, + * and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-batch/src/commands/SubmitJobCommand.ts b/clients/client-batch/src/commands/SubmitJobCommand.ts index ae2149e5b81c0..e7c4d636cf76e 100644 --- a/clients/client-batch/src/commands/SubmitJobCommand.ts +++ b/clients/client-batch/src/commands/SubmitJobCommand.ts @@ -24,10 +24,15 @@ export interface SubmitJobCommandOutput extends SubmitJobResponse, __MetadataBea /** *Submits an Batch job from a job definition. Parameters that are specified during SubmitJob
* override parameters defined in the job definition. vCPU and memory requirements that are specified in the
- * ResourceRequirements
objects in the job definition are the exception. They can't be overridden this way
+ * resourceRequirements
objects in the job definition are the exception. They can't be overridden this way
* using the memory
and vcpus
parameters. Rather, you must specify updates to job definition
* parameters in a ResourceRequirements
object that's included in the containerOverrides
* parameter.
Job queues with a scheduling policy are limited to 500 active fair share identifiers at a time.
+ *Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 * days, Fargate resources might become unavailable and job might be terminated.
diff --git a/clients/client-batch/src/commands/TagResourceCommand.ts b/clients/client-batch/src/commands/TagResourceCommand.ts index 9ae0d956aa7d9..93c9f6f69be0f 100644 --- a/clients/client-batch/src/commands/TagResourceCommand.ts +++ b/clients/client-batch/src/commands/TagResourceCommand.ts @@ -24,8 +24,8 @@ export interface TagResourceCommandOutput extends TagResourceResponse, __Metadat /** *Associates the specified tags to a resource with the specified resourceArn
. If existing tags on a
* resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that
- * are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, and job
- * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
Updates a scheduling + * policy.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BatchClient, UpdateSchedulingPolicyCommand } from "@aws-sdk/client-batch"; // ES Modules import + * // const { BatchClient, UpdateSchedulingPolicyCommand } = require("@aws-sdk/client-batch"); // CommonJS import + * const client = new BatchClient(config); + * const command = new UpdateSchedulingPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateSchedulingPolicyCommandInput} for command's `input` shape. + * @see {@link UpdateSchedulingPolicyCommandOutput} for command's `response` shape. + * @see {@link BatchClientResolvedConfig | config} for BatchClient's `config` shape. + * + */ +export class UpdateSchedulingPolicyCommand extends $Command< + UpdateSchedulingPolicyCommandInput, + UpdateSchedulingPolicyCommandOutput, + BatchClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateSchedulingPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackProvides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If
- * Ec2Configuration
isn't specified, the default is currently ECS_AL1
(Amazon Linux) for
- * non-GPU, non AWSGraviton instances. Starting on March 31, 2021, this default will be changing to ECS_AL2
- * (Amazon Linux
- * 2).
Ec2Configuration
isn't specified, the default is ECS_AL2
(Amazon Linux 2).
* This object isn't applicable to jobs that are running on Fargate resources.
*The image type to match with the instance type to select an AMI. If the imageIdOverride
parameter
- * isn't specified, then a recent Amazon ECS-optimized AMI (ECS_AL1
) is
- * used. Starting on March 31, 2021, this default will be changing to ECS_AL2
(Amazon Linux 2).
ECS_AL2
) is used.
*
* Amazon Linux
- * 2− Default for all Amazon Web Services Graviton-based instance families (for example, C6g
,
- * M6g
, R6g
, and T4g
) and can be used for all non-GPU instance types.
- * Amazon - * Linux−Default for all non-GPU, non Amazon Web Services Graviton instance families. Amazon Linux is reaching the - * end-of-life of standard support. For more information, see Amazon - * Linux AMI.
+ * Amazon Linux. + * Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI. *Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch,
* these take the form of "String1": "String2", where String1 is the tag key and String2 is the tag value−for
- * example, { "Name": "Batch Instance - C4OnDemand" }
. This is helpful for recognizing your Batch instances in the
- * Amazon EC2 console. These tags can't be updated or removed after the compute environment is created.Aany changes to these
- * tags require that you create a new compute environment and remove the old compute environment. These tags aren't seen
- * when using the Batch ListTagsForResource
API operation.
{ "Name": "Batch Instance - C4OnDemand" }
. This is helpful for recognizing your Batch
+ * instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any
+ * changes to these tags require that you create a new compute environment and remove the old compute environment. These
+ * tags aren't seen when using the Batch ListTagsForResource
API operation.
* This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be * specified.
@@ -631,7 +625,10 @@ export interface ComputeResource { /** *Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment.
- * If Ec2Configuration
isn't specified, the default is ECS_AL1
.
Ec2Configuration
isn't specified, the default is ECS_AL2
.
+ *
+ * One or two values can be provided.
+ * *This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be * specified.
@@ -689,6 +686,18 @@ export interface CreateComputeEnvironmentRequest { */ state?: CEState | string; + /** + *The maximum number of vCPUs for an + * unmanaged compute environment. This parameter is only used for fair share scheduling to reserve vCPU capacity for new + * share identifiers. If this parameter is not provided for a fair share job queue, no vCPU capacity will be + * reserved.
+ * + *This parameter is only supported when the type
parameter is set to UNMANAGED
/
Details about the compute resources managed by the compute environment. This parameter is required for managed * compute environments. For more information, see Compute Environments in the Batch User Guide.
@@ -721,7 +730,7 @@ export interface CreateComputeEnvironmentRequest { /** *The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag * consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General - * Reference.
+ * Reference. *These tags can be updated or removed using the TagResource and UntagResource API operations. These tags don't * propagate to the underlying compute resources.
*/ @@ -816,6 +825,18 @@ export interface CreateJobQueueRequest { */ state?: JQState | string; + /** + *Amazon Resource Name (ARN) of the fair share scheduling
+ * policy. If this parameter is specified, the job queue will use a fair share scheduling policy. If this parameter is
+ * not specified, the job queue will use a first in, first out (FIFO) scheduling policy. Once a job queue is created,
+ * the fair share scheduling policy can be replaced but not removed. The format is
+ * aws:Partition:batch:Region:Account:scheduling-policy/Name
+ *
.
+ * For example,
+ * aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy
.
The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
* priority
parameter) are evaluated first when associated with the same compute environment. Priority is
@@ -877,6 +898,151 @@ export namespace CreateJobQueueResponse {
});
}
+/**
+ *
Specifies the weights for the fair share identifiers for the fair share policy. Fair share identifiers that are
+ * not included have a default weight of 1.0
.
A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies
+ * the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers
+ * in a fair share policy cannot overlap. For example you cannot have one that specifies a shareIdentifier
+ * of UserA*
and another that specifies a shareIdentifier
of UserA-1
.
There can be no more than 500 fair share identifiers active in a job queue.
+ *The string is limited to 255 alphanumeric characters, optionally followed by '*'.
+ */ + shareIdentifier: string | undefined; + + /** + *The weight factor for the fair share + * identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs + * using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a + * share identifier with a weight factor of 1.
+ *The smallest supported value is 0.0001 and the largest supported value is 999.9999.
+ */ + weightFactor?: number; +} + +export namespace ShareAttributes { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ShareAttributes): any => ({ + ...obj, + }); +} + +/** + *The fair share policy for a scheduling + * policy.
+ */ +export interface FairsharePolicy { + /** + *The time period to use to calculate a + * fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only + * current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up + * to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The + * decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value + * is 604800 (1 week).
+ */ + shareDecaySeconds?: number; + + /** + *A value used to reserve some of the + * available maximum vCPU for fair share identifiers that have not yet been used.
+ *The reserved ratio is
+ * (computeReservation/100)^ActiveFairShares
+ *
where
+ *
+ * ActiveFairShares
+ *
is the number of active fair share identifiers.
For example, a computeReservation
value of 50 indicates that Batch should reserve 50% of the
+ * maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and
+ * 12.5% if there are three fair share identifiers. A computeReservation
value of 25 indicates that Batch
+ * should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two
+ * fair share identifiers, and 1.56% if there are three fair share identifiers.
The minimum value is 0 and the maximum value is 99.
+ */ + computeReservation?: number; + + /** + *Array of SharedIdentifier
+ * objects that contain the weights for the fair
+ * share identifiers for the fair share policy.
+ * Fair share identifiers that
+ * are not included have a default weight of 1.0
.
The name of the scheduling + * policy. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.
+ */ + name: string | undefined; + + /** + *The fair share policy of the scheduling + * policy.
+ */ + fairsharePolicy?: FairsharePolicy; + + /** + *The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag + * consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General + * Reference.
+ *These tags can be updated or removed using the TagResource and UntagResource API operations.
+ */ + tags?: { [key: string]: string }; +} + +export namespace CreateSchedulingPolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateSchedulingPolicyRequest): any => ({ + ...obj, + }); +} + +export interface CreateSchedulingPolicyResponse { + /** + *The name of the scheduling + * policy.
+ */ + name: string | undefined; + + /** + *The Amazon Resource Name (ARN) of the scheduling policy.
+ * The format is
+ * aws:Partition:batch:Region:Account:scheduling-policy/Name
+ *
.
+ * For example,
+ * aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy
.
Contains the parameters for DeleteComputeEnvironment
.
The Amazon Resource Name (ARN) of the scheduling policy to + * delete.
+ */ + arn: string | undefined; +} + +export namespace DeleteSchedulingPolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteSchedulingPolicyRequest): any => ({ + ...obj, + }); +} + +export interface DeleteSchedulingPolicyResponse {} + +export namespace DeleteSchedulingPolicyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteSchedulingPolicyResponse): any => ({ + ...obj, + }); +} + export interface DeregisterJobDefinitionRequest { /** *The name and revision (name:revision
) or full Amazon Resource Name (ARN) of the job definition to deregister.
The maximum number of VCPUs expected to be used for an unmanaged compute environment.
+ */ + unmanagedvCpus?: number; + /** *The Amazon Resource Name (ARN) of the underlying Amazon ECS cluster used by the compute environment.
*/ @@ -1101,8 +1300,8 @@ export interface DescribeComputeEnvironmentsResponse { /** *The nextToken
value to include in a future DescribeComputeEnvironments
request. When
- * the results of a DescribeJobDefinitions
request exceed maxResults
, this value can be used
- * to retrieve the next page of results. This value is null
when there are no more results to
+ * the results of a DescribeComputeEnvironments
request exceed maxResults
, this value can be
+ * used to retrieve the next page of results. This value is null
when there are no more results to
* return.
For jobs that are running on Fargate resources, then value
must match one of the supported
- * values and the MEMORY
values must be one of the values supported for that VCPU value. The supported
- * values are 0.25, 0.5, 1, 2, and 4
MEMORY
values must be one of the values supported for that
+ * VCPU
+ * value. The supported values are 0.25, 0.5, 1, 2, and 4
* Whether or not to use the Batch job IAM role defined in a job definition when mounting the Amazon EFS file system.
- * If enabled, transit encryption must be enabled in the EFSVolumeConfiguration
. If this
- * parameter is omitted, the default value of DISABLED
is used. For more information, see Using Amazon EFS Access Points in
- * the Batch User Guide. EFS IAM authorization requires that TransitEncryption
be
+ * If enabled, transit encryption must be enabled in the EFSVolumeConfiguration
. If this parameter is
+ * omitted, the default value of DISABLED
is used. For more information, see Using Amazon EFS Access Points in the
+ * Batch User Guide. EFS IAM authorization requires that TransitEncryption
be
* ENABLED
and that a JobRoleArn
is specified.
The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to
- * CpuShares
in the Create a container section of the Docker Remote API and the
- * --cpu-shares
option to docker run. The number of vCPUs must
- * be specified but can be specified in several places. You must specify it at least once for each node.
This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For
- * these resources, use resourceRequirement
instead. You can use this parameter or
- * resourceRequirements
structure but not both.
This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided. For
- * jobs that run on Fargate resources, you must specify the vCPU requirement for the job using
- * resourceRequirements
.
This parameter is deprecated, use resourceRequirements
to specify the vCPU requirements for the job
+ * definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies
+ * the number of vCPUs reserved for the job.
Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares
in the
+ * Create a container section of the Docker Remote API and the --cpu-shares
option to
+ * docker run. The number of vCPUs must be specified but can be specified
+ * in several places. You must specify it at least once for each node.
This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed - * the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The - * memory hard limit can be specified in several places. It must be specified for each node at least once.
- *This parameter maps to Memory
in the Create a container section of the
- * Docker Remote API and the --memory
option to docker
- * run.
This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate
- * resources, you should specify the memory requirement using resourceRequirement
. You can also do this for
- * EC2 resources.
If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a - * particular instance type, see Memory - * Management in the Batch User Guide.
- *This parameter is deprecated, use
+ * resourceRequirements
to specify the memory requirements for the job definition. It's not supported for
+ * jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard
+ * limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must
+ * specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several
+ * places. It must be specified for each node at least once.
Contains a glob pattern to match against the StatusReason
returned for a job. The pattern can be up
* to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including
- * spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact
- * match.
The string can be between 1 and 512 characters in length.
*/ onStatusReason?: string; @@ -2264,6 +2455,8 @@ export interface EvaluateOnExit { * 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces * and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact * match. + * + *The string can be between 1 and 512 characters in length.
*/ onReason?: string; @@ -2271,6 +2464,8 @@ export interface EvaluateOnExit { *Contains a glob pattern to match against the decimal representation of the ExitCode
returned for a
* job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an
* asterisk (*) so that only the start of the string needs to be an exact match.
The string can be between 1 and 512 characters in length.
*/ onExitCode?: string; @@ -2362,12 +2557,20 @@ export interface JobDefinition { status?: string; /** - *The type of job definition. If the job is run on Fargate resources, then multinode
isn't
- * supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the
- * Batch User Guide.
The type of job definition, either
+ * container
or multinode
. If the job is run on Fargate resources, then
+ * multinode
isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition
+ * in the Batch User Guide.
The scheduling priority of the job + * definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority + * will be scheduled before jobs with a lower scheduling priority.
+ */ + schedulingPriority?: number; + /** *Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are
* specified as a key-value pair mapping. Parameters in a SubmitJob
request override any corresponding
@@ -2524,6 +2727,15 @@ export interface JobQueueDetail {
*/
state: JQState | string | undefined;
+ /**
+ *
Amazon Resource Name (ARN) of the scheduling policy. The format is
+ * aws:Partition:batch:Region:Account:scheduling-policy/Name
+ *
.
+ * For example,
+ * aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy
.
The status of the job queue (for example, CREATING
or VALID
).
The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU
* requirement for the job using resourceRequirements
, but you can't specify the vCPU requirements in both
- * the vcpus
and resourceRequirement
object. This parameter maps to CpuShares
in
+ * the vcpus
and resourceRequirements
object. This parameter maps to CpuShares
in
* the Create a container section of the Docker Remote API and the --cpu-shares
option to
* docker run. Each vCPU is equivalent to 1,024 CPU shares. You must
* specify at least one vCPU. This is required but can be specified in several places. It must be specified for each
@@ -2633,7 +2845,7 @@ export interface ContainerDetail {
vcpus?: number;
/**
- *
For jobs run on EC2 resources that didn't specify memory requirements using ResourceRequirement
,
+ *
For jobs run on EC2 resources that didn't specify memory requirements using resourceRequirements
,
* the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see
* resourceRequirements
.
The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information, see Batch execution IAM role in the + *
The Amazon Resource Name (ARN) of the + * execution + * role that Batch can assume. For more information, see Batch execution IAM role in the * Batch User Guide.
*/ executionRoleArn?: string; @@ -2908,6 +3122,17 @@ export interface JobDetail { */ status: JobStatus | string | undefined; + /** + *The share identifier for the job.
+ */ + shareIdentifier?: string; + + /** + *The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy. + * Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.
+ */ + schedulingPriority?: number; + /** *A list of job attempts associated with this job.
*/ @@ -3033,6 +3258,82 @@ export namespace DescribeJobsResponse { }); } +export interface DescribeSchedulingPoliciesRequest { + /** + *A list of up to 100 scheduling policy + * Amazon Resource Name (ARN) entries.
+ */ + arns: string[] | undefined; +} + +export namespace DescribeSchedulingPoliciesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSchedulingPoliciesRequest): any => ({ + ...obj, + }); +} + +/** + *An object representing a scheduling + * policy.
+ */ +export interface SchedulingPolicyDetail { + /** + *The name of the scheduling + * policy.
+ */ + name: string | undefined; + + /** + *Amazon Resource Name (ARN) of the scheduling policy. An example would be
+ * arn:aws:batch:us-east-1:123456789012:scheduling-policy/HighPriority
+ *
+ *
The fair share policy for the scheduling + * policy.
+ */ + fairsharePolicy?: FairsharePolicy; + + /** + *The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag + * consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General + * Reference.
+ */ + tags?: { [key: string]: string }; +} + +export namespace SchedulingPolicyDetail { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SchedulingPolicyDetail): any => ({ + ...obj, + }); +} + +export interface DescribeSchedulingPoliciesResponse { + /** + *The list of scheduling + * policies.
+ */ + schedulingPolicies?: SchedulingPolicyDetail[]; +} + +export namespace DescribeSchedulingPoliciesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSchedulingPoliciesResponse): any => ({ + ...obj, + }); +} + /** *A filter name and value pair that's used to return a more specific list of results from a ListJobs
* API operation.
The maximum number of results returned by ListSchedulingPolicies
in paginated output. When this
+ * parameter is used, ListSchedulingPolicies
only returns maxResults
results in a single page
+ * and a nextToken
response element. The remaining results of the initial request can be seen by sending
+ * another ListSchedulingPolicies
request with the returned nextToken
value. This value can be
+ * between 1 and 100. If this parameter isn't used, then
+ * ListSchedulingPolicies
returns up to 100 results and a nextToken
value
+ * if applicable.
The nextToken
value returned from a previous paginated ListSchedulingPolicies
request
+ * where maxResults
was used and the results exceeded the value of that parameter. Pagination continues
+ * from the end of the previous results that returned the nextToken
value. This value is null
+ * when there are no more results to return.
This token should be treated as an opaque identifier that's only used to + * retrieve the next items in a list and not for other programmatic purposes.
+ *An object containing the details of a scheduling policy returned in a ListSchedulingPolicy
+ * action.
Amazon Resource Name (ARN) of the scheduling policy.
+ */ + arn: string | undefined; +} + +export namespace SchedulingPolicyListingDetail { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SchedulingPolicyListingDetail): any => ({ + ...obj, + }); +} + +export interface ListSchedulingPoliciesResponse { + /** + *A list of scheduling policies that match + * the request.
+ */ + schedulingPolicies?: SchedulingPolicyListingDetail[]; + + /** + *The nextToken
value to include in a future ListSchedulingPolicies
request. When the
+ * results of a ListSchedulingPolicies
request exceed maxResults
, this value can be used to
+ * retrieve the next page of results. This value is null
when there are no more results to return.
The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. Batch resources that support tags are compute environments, jobs, job definitions, and job - * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
+ *The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. Batch resources that support tags are compute environments, jobs, job definitions, job queues, + * and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
*/ resourceArn: string | undefined; } @@ -3383,6 +3762,15 @@ export interface RegisterJobDefinitionRequest { */ parameters?: { [key: string]: string }; + /** + *The scheduling priority for jobs that are + * submitted with this job definition. This will only affect jobs in job queues with a fair share policy. Jobs with a + * higher scheduling priority will be scheduled before jobs with a lower scheduling priority.
+ * + *The minimum supported value is 0 and the maximum supported value is 9999.
+ */ + schedulingPriority?: number; + /** *An object with various properties specific to single-node container-based jobs. If the job definition's
* type
parameter is container
, then you must specify either containerProperties
@@ -3485,34 +3873,32 @@ export interface ContainerOverrides {
/**
* @deprecated
*
- *
This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus
- * parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the
- * resourceRequirement
structure in the job definition. To override vCPU requirements that are specified
- * in the ResourceRequirement
structure in the job definition, ResourceRequirement
must be
+ *
This parameter is deprecated, use
+ * resourceRequirements
to override the vcpus
parameter that's set in the
+ * job definition. It's not supported for jobs
+ * that run on Fargate resources. For jobs run on EC2 resources, it overrides the vcpus
parameter set in
+ * the job definition, but doesn't override any vCPU requirement specified in the
+ * resourceRequirements
structure in the job definition. To override vCPU requirements that are specified
+ * in the resourceRequirements
structure in the job definition, resourceRequirements
must be
* specified in the SubmitJob
request, with type
set to VCPU
and
- * value
set to the new value.
This parameter maps to CpuShares
in the Create a container section of the
- * Docker Remote API and the --cpu-shares
option to docker run.
- * Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.
This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on
- * Fargate resources. For Fargate resources, you can only use resourceRequirement
. For EC2 resources,
- * you can use either this parameter or resourceRequirement
but not both.
value
set to the new value. For
+ * more information, see Can't override job definition
+ * resource requirements in the Batch User Guide.
*/
vcpus?: number;
/**
* @deprecated
*
- * This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the + *
This parameter is deprecated, use
+ * resourceRequirements
to override the memory requirements specified in the job definition. It's not
+ * supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the
* memory
parameter set in the job definition, but doesn't override any memory requirement specified in
- * the ResourceRequirement
structure in the job definition. To override memory requirements that are
- * specified in the ResourceRequirement
structure in the job definition, ResourceRequirement
+ * the resourceRequirements
structure in the job definition. To override memory requirements that are
+ * specified in the resourceRequirements
structure in the job definition, resourceRequirements
* must be specified in the SubmitJob
request, with type
set to MEMORY
and
- * value
set to the new value.
This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate
- * resources. For these resources, use resourceRequirement
instead.
value
set to the new value. For more information, see Can't override job definition
+ * resource requirements in the Batch User Guide.
*/
memory?: number;
@@ -3645,6 +4031,22 @@ export interface SubmitJobRequest {
*/
jobQueue: string | undefined;
+ /**
+ * The share identifier for the + * job.
+ */ + shareIdentifier?: string; + + /** + *The scheduling priority for the job. This + * will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority will be + * scheduled before jobs with a lower scheduling priority. This will override any scheduling priority in the job + * definition.
+ * + *The minimum supported value is 0 and the maximum supported value is 9999.
+ */ + schedulingPriorityOverride?: number; + /** *The array properties for the submitted job, such as the size of the array. The array size can be between 2 and * 10,000. If you specify array properties for a job, it becomes an array job. For more information, see Array Jobs in the @@ -3722,7 +4124,7 @@ export interface SubmitJobRequest { /** *
The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists * of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General - * Reference.
+ * Reference. */ tags?: { [key: string]: string }; } @@ -3764,15 +4166,15 @@ export namespace SubmitJobResponse { export interface TagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource that tags are added to. Batch resources that support tags are compute environments, jobs, job definitions, and job - * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
+ *The Amazon Resource Name (ARN) of the resource that tags are added to. Batch resources that support tags are compute environments, jobs, job definitions, job queues, + * and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
*/ resourceArn: string | undefined; /** *The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists of * a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General - * Reference.
+ * Reference. */ tags: { [key: string]: string } | undefined; } @@ -3836,8 +4238,8 @@ export namespace TerminateJobResponse { export interface UntagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource from which to delete tags. Batch resources that support tags are compute environments, jobs, job definitions, and job - * queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
+ *The Amazon Resource Name (ARN) of the resource from which to delete tags. Batch resources that support tags are compute environments, jobs, job definitions, job queues, + * and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.
*/ resourceArn: string | undefined; @@ -3950,6 +4352,15 @@ export interface UpdateComputeEnvironmentRequest { */ state?: CEState | string; + /** + *The maximum number of vCPUs expected to + * be used for an unmanaged compute environment. This parameter should not be specified for a managed compute + * environment. This parameter is only used for fair share scheduling to reserve vCPU capacity for new share + * identifiers. If this parameter is not provided for a fair share job queue, no vCPU capacity will be + * reserved.
+ */ + unmanagedvCpus?: number; + /** *Details of the compute resources managed by the compute environment. Required for a managed compute environment.
* For more information, see Compute
@@ -4025,6 +4436,16 @@ export interface UpdateJobQueueRequest {
*/
state?: JQState | string;
+ /**
+ * Amazon Resource Name (ARN) of the fair share scheduling policy. Once a job queue is created, the fair share scheduling policy can
+ * be replaced but not removed. The format is
+ * The priority of the job queue. Job queues with a higher priority (or a higher integer value for the
* The Amazon Resource Name (ARN) of the scheduling policy to update. The fair share
+ * policy. The Region in which to create the meeting. Default: The Region in which to create the meeting.
* Available values:
@@ -585,7 +585,7 @@ export interface CreateMeetingWithAttendeesRequest {
ClientRequestToken?: string;
/**
- * The Region in which to create the meeting. Default: The Region in which to create the meeting. Amazon DynamoDB is a fully managed NoSQL database service that provides fast and
-predictable performance with seamless scalability. DynamoDB lets you offload the
-administrative burdens of operating and scaling a distributed database, so that you don't have
-to worry about hardware provisioning, setup and configuration, replication, software patching,
-or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve any amount of
-data, and serve any level of request traffic. You can scale up or scale down your tables'
-throughput capacity without downtime or performance degradation, and use the AWS Management
-Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient
-number of servers to handle your throughput and storage requirements, while maintaining
-consistent and fast performance. All of your data is stored on solid state disks (SSDs) and
-automatically replicated across multiple Availability Zones in an AWS region, providing
-built-in high availability and data durability. Amazon DynamoDB is a fully managed NoSQL database service that provides fast
+and predictable performance with seamless scalability. DynamoDB lets you
+offload the administrative burdens of operating and scaling a distributed database, so
+that you don't have to worry about hardware provisioning, setup and configuration,
+replication, software patching, or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve
+any amount of data, and serve any level of request traffic. You can scale up or scale
+down your tables' throughput capacity without downtime or performance degradation, and
+use the Amazon Web Services Management Console to monitor resource utilization and performance
+metrics. DynamoDB automatically spreads the data and traffic for your tables over
+a sufficient number of servers to handle your throughput and storage requirements, while
+maintaining consistent and fast performance. All of your data is stored on solid state
+disks (SSDs) and automatically replicated across multiple Availability Zones in an
+Amazon Web Services Region, providing built-in high availability and data
+durability. Amazon DynamoDB is a fully managed NoSQL database service that provides fast
+ * and predictable performance with seamless scalability. DynamoDB lets you
+ * offload the administrative burdens of operating and scaling a distributed database, so
+ * that you don't have to worry about hardware provisioning, setup and configuration,
+ * replication, software patching, or cluster scaling. Amazon DynamoDB is a fully managed NoSQL database service that provides fast and
- * predictable performance with seamless scalability. DynamoDB lets you offload the
- * administrative burdens of operating and scaling a distributed database, so that you don't have
- * to worry about hardware provisioning, setup and configuration, replication, software patching,
- * or cluster scaling. With DynamoDB, you can create database tables that can store and retrieve
+ * any amount of data, and serve any level of request traffic. You can scale up or scale
+ * down your tables' throughput capacity without downtime or performance degradation, and
+ * use the Amazon Web Services Management Console to monitor resource utilization and performance
+ * metrics. With DynamoDB, you can create database tables that can store and retrieve any amount of
- * data, and serve any level of request traffic. You can scale up or scale down your tables'
- * throughput capacity without downtime or performance degradation, and use the AWS Management
- * Console to monitor resource utilization and performance metrics. DynamoDB automatically spreads the data and traffic for your tables over a sufficient
- * number of servers to handle your throughput and storage requirements, while maintaining
- * consistent and fast performance. All of your data is stored on solid state disks (SSDs) and
- * automatically replicated across multiple Availability Zones in an AWS region, providing
- * built-in high availability and data durability. DynamoDB automatically spreads the data and traffic for your tables over
+ * a sufficient number of servers to handle your throughput and storage requirements, while
+ * maintaining consistent and fast performance. All of your data is stored on solid state
+ * disks (SSDs) and automatically replicated across multiple Availability Zones in an
+ * Amazon Web Services Region, providing built-in high availability and data
+ * durability.
- * This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL.
- * This operation allows you to perform batch reads or writes on data stored in DynamoDB,
+ * using PartiQL. The entire batch must consist of either read statements or write statements, you
+ * cannot mix both in one batch. The A single operation can retrieve up to 16 MB of data, which can contain as many as 100
+ * The A single operation can retrieve up to 16 MB of data, which can contain as many as 100
* items. If you request more than 100 items, For example, if you ask to retrieve 100 items, but each individual item is 300 KB in
+ * For example, if you ask to retrieve 100 items, but each individual item is 300 KB in
* size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns
* an appropriate If none of the items can be processed due to insufficient
+ * If none of the items can be processed due to insufficient
* provisioned throughput on all of the tables in the request, then
* If DynamoDB returns any unprocessed items, you should retry the batch operation on those
- * items. However, we strongly recommend that you use an exponential backoff algorithm.
- * If you retry the batch operation immediately, the underlying read or write requests can
- * still fail due to throttling on the individual tables. If you delay the batch operation
- * using exponential backoff, the individual requests in the batch are much more likely to
- * succeed. For more information, see Batch
- * Operations and Error Handling in the Amazon DynamoDB Developer Guide. By default, In order to minimize response latency, When designing your application, keep in mind that DynamoDB does not return items in any
- * particular order. To help parse the response by item, include the primary key values for the
- * items in your request in the If a requested item does not exist, it is not returned in the result. Requests for
+ * If DynamoDB returns any unprocessed items, you should retry the batch operation on
+ * those items. However, we strongly recommend that you use an exponential
+ * backoff algorithm. If you retry the batch operation immediately, the
+ * underlying read or write requests can still fail due to throttling on the individual
+ * tables. If you delay the batch operation using exponential backoff, the individual
+ * requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB
+ * Developer Guide. By default, In order to minimize response latency, When designing your application, keep in mind that DynamoDB does not return items in
+ * any particular order. To help parse the response by item, include the primary key values
+ * for the items in your request in the If a requested item does not exist, it is not returned in the result. Requests for
* nonexistent items consume the minimum read capacity units according to the type of read.
* For more information, see Working with Tables in the Amazon DynamoDB Developer
* Guide. The The
- * The individual If none of the items can be processed due to insufficient
+ * The individual If none of the items can be processed due to insufficient
* provisioned throughput on all of the tables in the request, then
* If DynamoDB returns any unprocessed items, you should retry the batch operation on those
- * items. However, we strongly recommend that you use an exponential backoff algorithm.
- * If you retry the batch operation immediately, the underlying read or write requests can
- * still fail due to throttling on the individual tables. If you delay the batch operation
- * using exponential backoff, the individual requests in the batch are much more likely to
- * succeed. If DynamoDB returns any unprocessed items, you should retry the batch operation on
+ * those items. However, we strongly recommend that you use an exponential
+ * backoff algorithm. If you retry the batch operation immediately, the
+ * underlying read or write requests can still fail due to throttling on the individual
+ * tables. If you delay the batch operation using exponential backoff, the individual
+ * requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB
* Developer Guide. With With If you use a programming language that supports concurrency, you can use
- * threads to write items in parallel. Your application must include the necessary logic to
- * manage the threads. With languages that don't support threading, you must update
- * or delete the specified items one at a time. In both situations, Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: If you use a programming language that supports concurrency, you can use threads to
+ * write items in parallel. Your application must include the necessary logic to manage the
+ * threads. With languages that don't support threading, you must update or delete the
+ * specified items one at a time. In both situations, Parallel processing reduces latency, but each specified put and delete request
+ * consumes the same number of write capacity units whether it is processed in parallel or
+ * not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write
+ * operation: One or more tables specified in the One or more tables specified in the Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. Primary key attributes specified on an item in the request do not match those
+ * in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same You try to perform multiple operations on the same item in the same
+ *
- * Your request contains at least two items with identical hash and range keys (which essentially is two put operations).
- * Your request contains at least two items with identical hash and range keys
+ * (which essentially is two put operations). There are more than 25 requests in the batch. There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. The total request size exceeds 16 MB. Creates a backup for an existing table. Each time you create an on-demand backup, the entire table data is backed up. There
+ * Each time you create an on-demand backup, the entire table data is backed up. There
* is no limit to the number of on-demand backups that can be taken. When you create an on-demand backup, a time marker of the request is cataloged, and
+ * When you create an on-demand backup, a time marker of the request is cataloged, and
* the backup is created asynchronously, by applying all changes until the time of the
* request to the last full table snapshot. Backup requests are processed instantaneously
* and become available for restore within minutes. You can call All backups in DynamoDB work without consuming any provisioned throughput on the table. If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed
- * to contain all data committed to the table up to 14:24:00, and data committed after
+ * You can call All backups in DynamoDB work without consuming any provisioned throughput on the
+ * table. If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to
+ * contain all data committed to the table up to 14:24:00, and data committed after
* 14:26:00 will not be. The backup might contain data modifications made between 14:24:00
* and 14:26:00. On-demand backup does not support causal consistency.
- * Along with data, the following are also included on the backups:
- * Along with data, the following are also included on the backups: Global secondary indexes (GSIs) Global secondary indexes (GSIs) Local secondary indexes (LSIs) Local secondary indexes (LSIs) Streams Streams Provisioned read and write capacity Provisioned read and write capacity Creates a global table from an existing table. A global table creates a replication
* relationship between two or more DynamoDB tables with the same table name in the
* provided Regions. This operation only applies to Version 2017.11.29 of global tables. This operation only applies to Version
+ * 2017.11.29 of global tables. If you want to add a new replica table to a global table, each of the following conditions
- * must be true: If you want to add a new replica table to a global table, each of the following
+ * conditions must be true: The table must have the same primary key as all of the other replicas. The table must have the same primary key as all of the other replicas. The table must have the same name as all of the other replicas. The table must have the same name as all of the other replicas. The table must have DynamoDB Streams enabled, with the stream containing both the new and the old
- * images of the item. The table must have DynamoDB Streams enabled, with the stream containing both
+ * the new and the old images of the item. None of the replica tables in the global table can contain any data. None of the replica tables in the global table can contain any data.
- * If global secondary indexes are specified, then the following conditions must also be met:
- * If global secondary indexes are specified, then the following conditions must also be
+ * met:
- * The global secondary indexes must have the same name.
- * The global secondary indexes must have the same name.
- * The global secondary indexes must have the same hash key and sort key (if present).
- * The global secondary indexes must have the same hash key and sort key (if
+ * present).
- * If local secondary indexes are specified, then the following conditions must also be met:
- * If local secondary indexes are specified, then the following conditions must also be
+ * met:
- * The local secondary indexes must have the same name.
- * The local secondary indexes must have the same name.
- * The local secondary indexes must have the same hash key and sort key (if present).
- * The local secondary indexes must have the same hash key and sort key (if
+ * present).
- * Write capacity settings should be set consistently across your replica tables and
- * secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write
- * capacity settings for all of your global tables replicas and indexes.
- *
- * If you prefer to manage write capacity settings manually, you should provision equal
- * replicated write capacity units to your replica tables. You should also provision
- * equal replicated write capacity units to matching secondary indexes across
- * your global table.
- * Write capacity settings should be set consistently across your replica tables and
+ * secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the
+ * write capacity settings for all of your global tables replicas and indexes. If you prefer to manage write capacity settings manually, you should provision
+ * equal replicated write capacity units to your replica tables. You should also
+ * provision equal replicated write capacity units to matching secondary indexes across
+ * your global table. The
- * You can optionally define secondary indexes on the new table, as part of the You can use the The
+ * You can optionally define secondary indexes on the new table, as part of the
+ * You can use the Deletes an existing backup of a table. You can call You can call Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value. In addition to deleting an item, you can also return the item's attribute values in the same
- * operation, using the Unless you specify conditions, the Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted. Deletes a single item in a table by primary key. You can perform a conditional delete
+ * operation that deletes the item if it exists, or if it has an expected attribute
+ * value. In addition to deleting an item, you can also return the item's attribute values in
+ * the same operation, using the Unless you specify conditions, the Conditional deletes are useful for deleting items only if specific conditions are met.
+ * If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not
+ * deleted. The DynamoDB might continue to accept data read and write operations, such as When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes
- * into the aws:Partition:batch:Region:Account:scheduling-policy/Name
+ *
.
+ * For example,
+ * aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy
.priority
parameter) are evaluated first when associated with the same compute environment. Priority is
@@ -4078,3 +4499,36 @@ export namespace UpdateJobQueueResponse {
...obj,
});
}
+
+export interface UpdateSchedulingPolicyRequest {
+ /**
+ * us-east-1
.us-east-1
.BatchGetItem
operation returns the attributes of one or more items from one or
- * more tables. You identify requested items by primary key.BatchGetItem
operation returns the attributes of one or more items
+ * from one or more tables. You identify requested items by primary key.BatchGetItem
returns a partial result if the response size limit is
* exceeded, the table's provisioned throughput is exceeded, or an internal processing
* failure occurs. If a partial result is returned, the operation returns a value for
* UnprocessedKeys
. You can use this value to retry the operation starting
* with the next item to get.BatchGetItem
returns a
* ValidationException
with the message "Too many items requested for
* the BatchGetItem call."UnprocessedKeys
value so you can get the next page of
* results. If desired, your application can include its own logic to assemble the pages of
* results into one dataset.BatchGetItem
returns a
* ProvisionedThroughputExceededException
. If at least
* one of the items is successfully processed, then
* BatchGetItem
completes successfully, while returning the keys of the
* unread items in UnprocessedKeys
.BatchGetItem
performs eventually consistent reads on every table in the
- * request. If you want strongly consistent reads instead, you can set ConsistentRead
to
- * true
for any or all tables.BatchGetItem
retrieves items in parallel.ProjectionExpression
parameter.BatchGetItem
performs eventually consistent reads on every
+ * table in the request. If you want strongly consistent reads instead, you can set
+ * ConsistentRead
to true
for any or all tables.BatchGetItem
retrieves items in
+ * parallel.ProjectionExpression
parameter.BatchWriteItem
operation puts or deletes multiple items in one or more
- * tables. A single call to BatchWriteItem
can write up to 16 MB of data,
+ * BatchWriteItem
operation puts or deletes multiple items in one or
+ * more tables. A single call to BatchWriteItem
can write up to 16 MB of data,
* which can comprise as many as 25 put or delete requests. Individual items to be written
* can be as large as 400 KB.BatchWriteItem
cannot update items. To update items, use the UpdateItem
- * action.PutItem
and DeleteItem
operations specified in
- * BatchWriteItem
are atomic; however BatchWriteItem
as a whole is not. If any
- * requested operations fail because the table's provisioned throughput is exceeded or an
- * internal processing failure occurs, the failed operations are returned in the
- * UnprocessedItems
response parameter. You can investigate and optionally resend the
- * requests. Typically, you would call BatchWriteItem
in a loop. Each iteration would
- * check for unprocessed items and submit a new BatchWriteItem
request with those
- * unprocessed items until all items have been processed.BatchWriteItem
cannot update items. To update items, use the
+ * UpdateItem
action.PutItem
and DeleteItem
operations specified
+ * in BatchWriteItem
are atomic; however BatchWriteItem
as a
+ * whole is not. If any requested operations fail because the table's provisioned
+ * throughput is exceeded or an internal processing failure occurs, the failed operations
+ * are returned in the UnprocessedItems
response parameter. You can
+ * investigate and optionally resend the requests. Typically, you would call
+ * BatchWriteItem
in a loop. Each iteration would check for unprocessed
+ * items and submit a new BatchWriteItem
request with those unprocessed items
+ * until all items have been processed.BatchWriteItem
returns a
* ProvisionedThroughputExceededException
.BatchWriteItem
, you can efficiently write or delete large amounts of
+ * BatchWriteItem
, you can efficiently write or delete large amounts of
* data, such as from Amazon EMR, or copy data from another database into DynamoDB. In
* order to improve performance with these large-scale operations,
* BatchWriteItem
does not behave in the same way as individual
* PutItem
and DeleteItem
calls would. For example, you
* cannot specify conditions on individual put and delete requests, and
* BatchWriteItem
does not return deleted items in the response.BatchWriteItem
- * performs the specified put and delete operations in
- * parallel, giving you the power of the thread pool approach without having to introduce
- * complexity into your application.
+ *
BatchWriteItem
performs
+ * the specified put and delete operations in parallel, giving you the power of the thread
+ * pool approach without having to introduce complexity into your application.
*
*/
@@ -438,34 +447,34 @@ export class DynamoDB extends DynamoDBClient {
/**
* BatchWriteItem
request does not exist.BatchWriteItem
request does
+ * not exist.BatchWriteItem
- * request. For example, you cannot put and delete the same item in the same
- * BatchWriteItem
request. BatchWriteItem
request. For example, you cannot put and delete
+ * the same item in the same BatchWriteItem
request. CreateBackup
at a maximum rate of 50 times per second.CreateBackup
at a maximum rate of 50 times per
+ * second.
+ *
*
*/
public createBackup(
@@ -498,71 +507,60 @@ export class DynamoDB extends DynamoDBClient {
*
+ *
*
- *
*
- *
*
*
- * CreateTable
operation adds a new table to your account. In an AWS
- * account, table names must be unique within each Region. That is, you can have two tables
- * with same name if you create the tables in different Regions.CreateTable
is an asynchronous operation. Upon receiving a CreateTable
request,
- * DynamoDB immediately returns a response with a TableStatus
of CREATING
. After
- * the table is created, DynamoDB sets the TableStatus
to ACTIVE
. You can
- * perform read and write operations only on an ACTIVE
table. CreateTable
- * operation. If you want to create multiple tables with secondary indexes on them, you must create the
- * tables sequentially. Only one table with secondary indexes can be in the CREATING
state at
- * any given time.DescribeTable
action to check the table status.CreateTable
operation adds a new table to your account. In an Amazon Web Services account, table names must be unique within each Region. That is, you can
+ * have two tables with same name if you create the tables in different Regions.CreateTable
is an asynchronous operation. Upon receiving a
+ * CreateTable
request, DynamoDB immediately returns a response with a
+ * TableStatus
of CREATING
. After the table is created,
+ * DynamoDB sets the TableStatus
to ACTIVE
. You can perform read
+ * and write operations only on an ACTIVE
table. CreateTable
operation. If you want to create multiple tables with
+ * secondary indexes on them, you must create the tables sequentially. Only one table with
+ * secondary indexes can be in the CREATING
state at any given time.DescribeTable
action to check the table status.DeleteBackup
at a maximum rate of 10 times per second.DeleteBackup
at a maximum rate of 10 times per
+ * second.ReturnValues
parameter.DeleteItem
is an idempotent operation; running it
- * multiple times on the same item or attribute does not result in an error response.ReturnValues
parameter.DeleteItem
is an idempotent operation;
+ * running it multiple times on the same item or attribute does not
+ * result in an error response.DeleteTable
operation deletes a table and all of its items. After a
- * DeleteTable
request, the specified table is in the DELETING
state until
- * DynamoDB completes the deletion. If the table is in the ACTIVE
state, you can delete
- * it. If a table is in CREATING
or UPDATING
states, then DynamoDB returns
- * a ResourceInUseException
. If the specified table does not exist, DynamoDB returns a
- * ResourceNotFoundException
. If table is already in the DELETING
state, no
- * error is returned. GetItem
and
- * PutItem
, on a table in the DELETING
state until the table deletion is
- * complete.DISABLED
state, and the stream is automatically deleted after 24 hours.DeleteTable
request, the specified table is in the
+ * DELETING
state until DynamoDB completes the deletion. If the table is
+ * in the ACTIVE
state, you can delete it. If a table is in
+ * CREATING
or UPDATING
states, then DynamoDB returns a
+ * ResourceInUseException
. If the specified table does not exist, DynamoDB
+ * returns a ResourceNotFoundException
. If table is already in the
+ * DELETING
state, no error is returned.
DynamoDB might continue to accept data read and write operations, such as
+ * GetItem
and PutItem
, on a table in the
+ * DELETING
state until the table deletion is complete.
When you delete a table, any indexes on that table are also deleted.
+ *If you have DynamoDB Streams enabled on the table, then the corresponding stream on
+ * that table goes into the DISABLED
state, and the stream is automatically
+ * deleted after 24 hours.
Use the DescribeTable
action to check the status of the table.
Use the DescribeTable
action to check the status of the table.
Describes an existing backup of a table.
- *You can call DescribeBackup
at a maximum rate of 10 times per second.
You can call DescribeBackup
at a maximum rate of 10 times per
+ * second.
Checks the status of continuous backups and point in time recovery on the specified table.
- * Continuous backups are ENABLED
on all tables at table creation.
- * If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to any
- * point in time within EarliestRestorableDateTime
and
+ *
Checks the status of continuous backups and point in time recovery on the specified
+ * table. Continuous backups are ENABLED
on all tables at table creation. If
+ * point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to
+ * ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to
+ * any point in time within EarliestRestorableDateTime
and
* LatestRestorableDateTime
.
- * LatestRestorableDateTime
is typically 5 minutes before the current time. You can restore your table to any point
- * in time during the last 35 days.
- *
You can call DescribeContinuousBackups
at a maximum rate of 10 times per second.
+ * LatestRestorableDateTime
is typically 5 minutes before the current time.
+ * You can restore your table to any point in time during the last 35 days.
You can call DescribeContinuousBackups
at a maximum rate of 10 times per
+ * second.
Returns information about contributor insights, for a given table or global secondary index.
+ *Returns information about contributor insights, for a given table or global secondary + * index.
*/ public describeContributorInsights( args: DescribeContributorInsightsCommandInput, @@ -907,10 +916,11 @@ export class DynamoDB extends DynamoDBClient { /** *Returns information about the specified global table.
- *This operation only applies to Version 2017.11.29 of global tables. - * If you are using global tables Version 2019.11.21 you can use DescribeTable instead.
- *This operation only applies to Version + * 2017.11.29 of global tables. If you are using global tables Version + * 2019.11.21 you can use DescribeTable instead.
+ *Describes Region-specific settings for a global table.
- *This operation only applies to Version 2017.11.29 of global tables.
- *This operation only applies to Version + * 2017.11.29 of global tables.
+ *Returns the current provisioned-capacity quotas for your AWS account in a Region, both - * for the Region as a whole and for any one DynamoDB table that you create there.
- *When you establish an AWS account, the account has initial quotas on the maximum read - * capacity units and write capacity units that you can provision across all of your - * DynamoDB tables in a given Region. Also, there are per-table quotas that apply when you - * create a table there. For more information, see Service, Account, and Table - * Quotas page in the Amazon DynamoDB Developer - * Guide.
+ *Returns the current provisioned-capacity quotas for your Amazon Web Services account in + * a Region, both for the Region as a whole and for any one DynamoDB table that you create + * there.
+ *When you establish an Amazon Web Services account, the account has initial quotas on + * the maximum read capacity units and write capacity units that you can provision across + * all of your DynamoDB tables in a given Region. Also, there are per-table + * quotas that apply when you create a table there. For more information, see Service, + * Account, and Table Quotas page in the Amazon DynamoDB + * Developer Guide.
* - *Although you can increase these quotas by filing a case at AWS Support Center, obtaining the increase is not
- * instantaneous. The DescribeLimits
action lets you write code to compare the
- * capacity you are currently using to those quotas imposed by your account so that you
- * have enough time to apply for an increase before you hit a quota.
Although you can increase these quotas by filing a case at Amazon Web Services Support Center, obtaining the
+ * increase is not instantaneous. The DescribeLimits
action lets you write
+ * code to compare the capacity you are currently using to those quotas imposed by your
+ * account so that you have enough time to apply for an increase before you hit a
+ * quota.
For example, you could use one of the AWS SDKs to do the following:
+ *For example, you could use one of the Amazon Web Services SDKs to do the + * following:
* - *Call DescribeLimits
for a particular Region to obtain your current
- * account quotas on provisioned capacity there.
Call DescribeLimits
for a particular Region to obtain your
+ * current account quotas on provisioned capacity there.
Create a variable to hold the aggregate read capacity units provisioned for all - * your tables in that Region, and one to hold the aggregate write capacity units. - * Zero them both.
+ *Create a variable to hold the aggregate read capacity units provisioned for + * all your tables in that Region, and one to hold the aggregate write capacity + * units. Zero them both.
*Call ListTables
to obtain a list of all your DynamoDB tables.
Call ListTables
to obtain a list of all your DynamoDB
+ * tables.
For each table name listed by ListTables
, do the following:
For each table name listed by ListTables
, do the
+ * following:
Call DescribeTable
with the table name.
Call DescribeTable
with the table name.
Use the data returned by DescribeTable
to add the read capacity units and write capacity
- * units provisioned for the table itself to your variables.
Use the data returned by DescribeTable
to add the read
+ * capacity units and write capacity units provisioned for the table itself
+ * to your variables.
If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.
- *If the table has one or more global secondary indexes (GSIs), loop + * over these GSIs and add their provisioned capacity values to your + * variables as well.
+ * * * *Report the account quotas for that Region returned by DescribeLimits
, along with
- * the total current provisioned capacity levels you have calculated.
Report the account quotas for that Region returned by
+ * DescribeLimits
, along with the total current provisioned
+ * capacity levels you have calculated.
This will let you see whether you are getting close to your account-level quotas.
- *The per-table quotas apply only when you are creating a new table. They restrict the sum - * of the provisioned capacity of the new table itself and all its global secondary + *
This will let you see whether you are getting close to your account-level + * quotas.
+ *The per-table quotas apply only when you are creating a new table. They restrict the + * sum of the provisioned capacity of the new table itself and all its global secondary * indexes.
- *For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned + *
For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned * capacity extremely rapidly, but the only quota that applies is that the aggregate * provisioned capacity over all your tables and GSIs cannot exceed either of the * per-account quotas.
- *
- * DescribeLimits
should only be called periodically. You can expect throttling
- * errors if you call it more than once in a minute.
The DescribeLimits
Request element has no content.
DescribeLimits
should only be called periodically. You can expect
+ * throttling errors if you call it more than once in a minute.
+ * The DescribeLimits
Request element has no content.
Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.
- *If you issue a DescribeTable
request immediately after a CreateTable
request, DynamoDB might
- * return a ResourceNotFoundException
. This is because DescribeTable
uses an eventually
- * consistent query, and the metadata for your table might not be available at that moment.
- * Wait for a few seconds, and then try the DescribeTable
request again.
Returns information about the table, including the current status of the table, when + * it was created, the primary key schema, and any indexes on the table.
+ *If you issue a DescribeTable
request immediately after a
+ * CreateTable
request, DynamoDB might return a
+ * ResourceNotFoundException
. This is because
+ * DescribeTable
uses an eventually consistent query, and the metadata
+ * for your table might not be available at that moment. Wait for a few seconds, and
+ * then try the DescribeTable
request again.
Describes auto scaling settings across replicas of the global table at once.
- *This operation only applies to Version 2019.11.21 of global tables.
- *This operation only applies to Version + * 2019.11.21 of global tables.
+ *Starts table data replication to the specified Kinesis data stream at a timestamp chosen - * during the enable workflow. If this operation doesn't return results immediately, use - * DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is - * ACTIVE.
+ *Starts table data replication to the specified Kinesis data stream at a timestamp + * chosen during the enable workflow. If this operation doesn't return results immediately, + * use DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream + * is ACTIVE.
*/ public enableKinesisStreamingDestination( args: EnableKinesisStreamingDestinationCommandInput, @@ -1277,9 +1302,8 @@ export class DynamoDB extends DynamoDBClient { } /** - *- * This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. - *
+ *This operation allows you to perform reads and singleton writes on data stored in + * DynamoDB, using PartiQL.
*/ public executeStatement( args: ExecuteStatementCommandInput, @@ -1311,9 +1335,14 @@ export class DynamoDB extends DynamoDBClient { } /** - *- * This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL. - *
+ *This operation allows you to perform transactional reads or writes on data stored in + * DynamoDB, using PartiQL.
+ *The entire transaction must consist of either read statements or write statements,
+ * you cannot mix both in one transaction. The EXISTS function is an exception and can
+ * be used to check the condition of specific attributes of the item in a similar
+ * manner to ConditionCheck
in the TransactWriteItems API.
The GetItem
operation returns a set of attributes for the item with the given primary
- * key. If there is no matching item, GetItem
does not return any data and there will be no Item
element in the response.
- * GetItem
provides an eventually consistent read by default. If your application
- * requires a strongly consistent read, set ConsistentRead
to true
. Although
- * a strongly consistent read might take more time than an eventually consistent read, it always
- * returns the last updated value.
The GetItem
operation returns a set of attributes for the item with the
+ * given primary key. If there is no matching item, GetItem
does not return
+ * any data and there will be no Item
element in the response.
+ * GetItem
provides an eventually consistent read by default. If your
+ * application requires a strongly consistent read, set ConsistentRead
to
+ * true
. Although a strongly consistent read might take more time than an
+ * eventually consistent read, it always returns the last updated value.
List backups associated with an AWS account. To list backups for a given table, specify
- * TableName
. ListBackups
returns a paginated list of results
- * with at most 1 MB worth of items in a page. You can also specify a maximum number of
- * entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these + *
List backups associated with an Amazon Web Services account. To list backups for a
+ * given table, specify TableName
. ListBackups
returns a
+ * paginated list of results with at most 1 MB worth of items in a page. You can also
+ * specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these * boundaries are for the time at which the original backup was requested.
- *You can call ListBackups
a maximum of five times per second.
You can call ListBackups
a maximum of five times per second.
Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.
+ *Returns a list of ContributorInsightsSummary for a table and all its global secondary + * indexes.
*/ public listContributorInsights( args: ListContributorInsightsCommandInput, @@ -1502,9 +1533,10 @@ export class DynamoDB extends DynamoDBClient { /** *Lists all global tables that have a replica in the specified Region.
- *This operation only applies to Version 2017.11.29 of global tables.
- *This operation only applies to Version + * 2017.11.29 of global tables.
+ *Returns an array of table names associated with the current account and endpoint. The output
- * from ListTables
is paginated, with each page returning a maximum of 100 table
- * names.
Returns an array of table names associated with the current account and endpoint. The
+ * output from ListTables
is paginated, with each page returning a maximum of
+ * 100 table names.
List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.
- *For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 + * times per second, per account.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
*/ public listTagsOfResource( args: ListTagsOfResourceCommandInput, @@ -1599,74 +1631,81 @@ export class DynamoDB extends DynamoDBClient { } /** - *Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues
parameter.
Creates a new item, or replaces an old item with a new item. If an item that has the
+ * same primary key as the new item already exists in the specified table, the new item
+ * completely replaces the existing item. You can perform a conditional put operation (add
+ * a new item if one with the specified primary key doesn't exist), or replace an existing
+ * item if it has certain attribute values. You can return the item's attribute values in
+ * the same operation, using the ReturnValues
parameter.
This topic provides general information about the PutItem
API.
For information on how to call the PutItem
API using the AWS SDK in specific languages, see the following:
For information on how to call the PutItem
API using the Amazon Web Services SDK in specific languages, see the following:
- * PutItem in the AWS SDK for .NET + *
- *- * PutItem in the AWS SDK for C++ + *
- *- * PutItem in the AWS SDK for Go + *
- *- * PutItem in the AWS SDK for Java + *
- *+ *
* - * PutItem in the AWS SDK for Python + * PutItem in the SDK for Python (Boto) *
- *When you add an item, the primary key attributes are the only required attributes. + *
When you add an item, the primary key attributes are the only required attributes. * Attribute values cannot be null.
- *Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. - * Set type attributes cannot be empty.
- *Invalid Requests with empty values will
- * be rejected with a ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional expression
- * that contains the attribute_not_exists
function with the name of the
- * attribute being used as the partition key for the table. Since every record must contain
- * that attribute, the attribute_not_exists
function will only succeed if
- * no matching item exists.
For more information about PutItem
, see Working with Items in the Amazon DynamoDB Developer Guide.
Empty String and Binary attribute values are allowed. Attribute values of type String + * and Binary must have a length greater than zero if the attribute is used as a key + * attribute for a table or index. Set type attributes cannot be empty.
+ *Invalid Requests with empty values will be rejected with a
+ * ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional
+ * expression that contains the attribute_not_exists
function with the
+ * name of the attribute being used as the partition key for the table. Since every
+ * record must contain that attribute, the attribute_not_exists
function
+ * will only succeed if no matching item exists.
For more information about PutItem
, see Working with
+ * Items in the Amazon DynamoDB Developer Guide.
The Query
operation finds items based on primary key values.
- * You can query any table or secondary index that has a composite primary key (a partition
- * key and a sort key).
- *
Use the KeyConditionExpression
parameter to provide a specific value
- * for the partition key. The Query
operation will return all of the items
- * from the table or index with that partition key value. You can optionally narrow the
- * scope of the Query
operation by specifying a sort key value and a
- * comparison operator in KeyConditionExpression
. To further refine the Query
results,
- * you can optionally provide a FilterExpression
. A FilterExpression
determines which items
- * within the results should be returned to you. All of the other results are discarded.
- *
- * A Query
operation always returns a result set. If no matching items are found,
- * the result set will be empty. Queries that do not return results consume the minimum number of
- * read capacity units for that type of read operation.
- *
You must provide the name of the partition key attribute and a single value for that
+ * attribute. Query
returns all items with that partition key value.
+ * Optionally, you can provide a sort key attribute and use a comparison operator to refine
+ * the search results.
Use the KeyConditionExpression
parameter to provide a specific value for
+ * the partition key. The Query
operation will return all of the items from
+ * the table or index with that partition key value. You can optionally narrow the scope of
+ * the Query
operation by specifying a sort key value and a comparison
+ * operator in KeyConditionExpression
. To further refine the
+ * Query
results, you can optionally provide a
+ * FilterExpression
. A FilterExpression
determines which
+ * items within the results should be returned to you. All of the other results are
+ * discarded.
A Query
operation always returns a result set. If no matching items are
+ * found, the result set will be empty. Queries that do not return results consume the
+ * minimum number of read capacity units for that type of read operation.
- * DynamoDB calculates the number of read capacity units consumed based on item size,
- * not on the amount of data that is returned to an application. The number of capacity
- * units consumed will be the same whether you request all of the attributes (the default behavior)
- * or just some of them (using a projection expression). The number will also be the same
- * whether or not you use a FilterExpression
.
+ *
DynamoDB calculates the number of read capacity units consumed based on item
+ * size, not on the amount of data that is returned to an application. The number of
+ * capacity units consumed will be the same whether you request all of the attributes
+ * (the default behavior) or just some of them (using a projection expression). The
+ * number will also be the same whether or not you use a FilterExpression
.
*
- * Query
results are always sorted by the sort key value. If the data type of the sort key is Number,
- * the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes.
- * By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter
- * to false.
- *
A single Query
operation will read up to the maximum number of items
- * set (if using the Limit
parameter) or a maximum of 1 MB of data and then
- * apply any filtering to the results using FilterExpression
. If
+ * Query
results are always sorted by the sort key value. If the data type of
+ * the sort key is Number, the results are returned in numeric order; otherwise, the
+ * results are returned in order of UTF-8 bytes. By default, the sort order is ascending.
+ * To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set
+ * (if using the Limit
parameter) or a maximum of 1 MB of data and then apply
+ * any filtering to the results using FilterExpression
. If
* LastEvaluatedKey
is present in the response, you will need to paginate
* the result set. For more information, see Paginating
* the Results in the Amazon DynamoDB Developer Guide.
* FilterExpression
is applied after a Query
finishes, but before
- * the results are returned.
- * A FilterExpression
cannot contain partition key or sort key attributes.
- * You need to specify those attributes in the KeyConditionExpression
.
- *
FilterExpression
cannot contain partition key
+ * or sort key attributes. You need to specify those attributes in the
+ * KeyConditionExpression
.
*
- * A Query
operation can return an empty result set and a LastEvaluatedKey
- * if all the items read for the page of results are filtered out.
- *
A Query
operation can return an empty result set and a
+ * LastEvaluatedKey
if all the items read for the page of results are
+ * filtered out.
You can query a table, a local secondary index, or a global secondary index. For a
* query on a table or on a local secondary index, you can set the
- * ConsistentRead
parameter to true
and obtain a
- * strongly consistent result. Global secondary indexes support eventually consistent reads
- * only, so do not specify ConsistentRead
when querying a global
- * secondary index.
ConsistentRead
parameter to true
and obtain a strongly
+ * consistent result. Global secondary indexes support eventually consistent reads only, so
+ * do not specify ConsistentRead
when querying a global secondary
+ * index.
*/
public query(args: QueryCommandInput, options?: __HttpHandlerOptions): PromiseCreates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores - * (any type of restore) in a given account. - *
- *You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
- *Creates a new table from an existing backup. Any number of users can execute up to 4 + * concurrent restores (any type of restore) in a given account.
+ *You can call RestoreTableFromBackup
at a maximum rate of 10 times per
+ * second.
You must manually set up the following on the restored table:
+ *Auto scaling policies
- *Auto scaling policies
+ * *IAM policies
- *IAM policies
+ * *Amazon CloudWatch metrics and alarms
- *Amazon CloudWatch metrics and alarms
+ * *Tags
- *Tags
+ * *Stream settings
- *Stream settings
+ * *Time to Live (TTL) settings
- *Time to Live (TTL) settings
+ * *Restores the specified table to the specified point in time within
- * EarliestRestorableDateTime
and LatestRestorableDateTime
.
- * You can restore your table to any point in time during the last 35 days.
- * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
- *
- * When you restore using point in time recovery, DynamoDB restores your table data to the state based on - * the selected date and time (day:hour:minute:second) to a new table. - *
- *- * Along with data, the following are also included on the new restored table using point in time recovery: - *
+ *EarliestRestorableDateTime
and LatestRestorableDateTime
.
+ * You can restore your table to any point in time during the last 35 days. Any number of
+ * users can execute up to 4 concurrent restores (any type of restore) in a given account.
+ * When you restore using point in time recovery, DynamoDB restores your table data to + * the state based on the selected date and time (day:hour:minute:second) to a new table.
+ *Along with data, the following are also included on the new restored table using + * point in time recovery:
*Global secondary indexes (GSIs)
- *Global secondary indexes (GSIs)
+ * *Local secondary indexes (LSIs)
- *Local secondary indexes (LSIs)
+ * *Provisioned read and write capacity
- *Provisioned read and write capacity
+ * *Encryption settings
- *- * All these settings come from the current settings of the source table at the time of restore. - *
- *Encryption settings
+ *All these settings come from the current settings of the source table at + * the time of restore.
+ *You must manually set up the following on the restored table:
- *You must manually set up the following on the restored table:
+ *Auto scaling policies
- *Auto scaling policies
+ * *IAM policies
- *IAM policies
+ * *Amazon CloudWatch metrics and alarms
- *Amazon CloudWatch metrics and alarms
+ * *Tags
- *Tags
+ * *Stream settings
- *Stream settings
+ * *Time to Live (TTL) settings
- *Time to Live (TTL) settings
+ * *Point in time recovery settings
- *Point in time recovery settings
+ * *The Scan
operation returns one or more items and item attributes by accessing every
- * item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the
- * scan stops and results are returned to the user as a LastEvaluatedKey
value
- * to continue the scan in a subsequent operation. The results also include the number of
- * items exceeding the limit. A scan can result in no table data meeting the filter
- * criteria.
A single Scan
operation reads up to the maximum number of items set (if
+ *
The Scan
operation returns one or more items and item attributes by
+ * accessing every item in a table or a secondary index. To have DynamoDB return fewer
+ * items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB,
+ * the scan stops and results are returned to the user as a LastEvaluatedKey
+ * value to continue the scan in a subsequent operation. The results also include the
+ * number of items exceeding the limit. A scan can result in no table data meeting the
+ * filter criteria.
A single Scan
operation reads up to the maximum number of items set (if
* using the Limit
parameter) or a maximum of 1 MB of data and then apply any
* filtering to the results using FilterExpression
. If
* LastEvaluatedKey
is present in the response, you need to paginate the
* result set. For more information, see Paginating the
* Results in the Amazon DynamoDB Developer Guide.
+ *
* Scan
operations proceed sequentially; however, for faster performance on
* a large table or secondary index, applications can request a parallel Scan
* operation by providing the Segment
and TotalSegments
* parameters. For more information, see Parallel
* Scan in the Amazon DynamoDB Developer Guide.
- * Scan
uses eventually consistent reads when accessing the data in a
- * table; therefore, the result set might not include the changes to data in the table
- * immediately before the operation began. If you need a consistent copy of the data, as of
- * the time that the Scan
begins, you can set the ConsistentRead
- * parameter to true
.
+ * Scan
uses eventually consistent reads when accessing the data in a table;
+ * therefore, the result set might not include the changes to data in the table immediately
+ * before the operation began. If you need a consistent copy of the data, as of the time
+ * that the Scan
begins, you can set the ConsistentRead
parameter
+ * to true
.
For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
*/ public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): PromiseTransactGetItem
objects, each of which contains a Get
* structure that specifies an item to retrieve from a table in the account and Region. A
* call to TransactGetItems
cannot retrieve items from tables in more than one
- * AWS account or Region. The aggregate size of the items in the transaction cannot exceed
- * 4 MB.
- * DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
DynamoDB rejects the entire TransactGetItems
request if any of
+ * the following is true:
A conflicting operation is in the process of updating an - * item to be read.
+ *A conflicting operation is in the process of updating an item to be + * read.
*There is insufficient provisioned capacity for the transaction - * to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*The aggregate size of the items in the transaction cannot exceed 4 MB.
+ *The aggregate size of the items in the transaction cannot exceed 4 MB.
*
* TransactWriteItems
is a synchronous write operation that groups up to 25
* action requests. These actions can target items in different tables, but not in
- * different AWS accounts or Regions, and no two actions can target the same item. For
- * example, you cannot both ConditionCheck
and Update
the same
- * item. The aggregate size of the items in the transaction cannot exceed 4 MB.
ConditionCheck
and Update
+ * the same item. The aggregate size of the items in the transaction cannot exceed 4
+ * MB.
*
- * The actions are completed atomically so that either all of - * them succeed, or all of them fail. They are defined by the following objects:
+ *The actions are completed atomically so that either all of them succeed, or all of + * them fail. They are defined by the following objects:
* - *
- * Put
 —  Initiates a PutItem
operation to write a new
- * item. This structure specifies the primary key of the item to be written, the
- * name of the table to write it in, an optional condition expression that must be
- * satisfied for the write to succeed, a list of the item's attributes, and a field
- * indicating whether to retrieve the item's attributes if the condition is not
- * met.
- * Update
 —  Initiates an UpdateItem
operation to
- * update an existing item. This structure specifies the primary key of the item to
- * be updated, the name of the table where it resides, an optional condition
- * expression that must be satisfied for the update to succeed, an expression that
- * defines one or more attributes to be updated, and a field indicating whether to
- * retrieve the item's attributes if the condition is not met.
+ * Put
 —  Initiates a PutItem
+ * operation to write a new item. This structure specifies the primary key of the
+ * item to be written, the name of the table to write it in, an optional condition
+ * expression that must be satisfied for the write to succeed, a list of the item's
+ * attributes, and a field indicating whether to retrieve the item's attributes if
+ * the condition is not met.
- * Delete
 —  Initiates a DeleteItem
operation to
- * delete an existing item. This structure specifies the primary key of the item to
- * be deleted, the name of the table where it resides, an optional condition
- * expression that must be satisfied for the deletion to succeed, and a field
+ *
+ * Update
 —  Initiates an UpdateItem
+ * operation to update an existing item. This structure specifies the primary key
+ * of the item to be updated, the name of the table where it resides, an optional
+ * condition expression that must be satisfied for the update to succeed, an
+ * expression that defines one or more attributes to be updated, and a field
* indicating whether to retrieve the item's attributes if the condition is not
* met.
- * ConditionCheck
 —  Applies a condition to an item that is not
- * being modified by the transaction. This structure specifies the primary key of
- * the item to be checked, the name of the table where it resides, a condition
- * expression that must be satisfied for the transaction to succeed, and a field
- * indicating whether to retrieve the item's attributes if the condition is not
- * met.
+ * Delete
 —  Initiates a DeleteItem
+ * operation to delete an existing item. This structure specifies the primary key
+ * of the item to be deleted, the name of the table where it resides, an optional
+ * condition expression that must be satisfied for the deletion to succeed, and a
+ * field indicating whether to retrieve the item's attributes if the condition is
+ * not met.
+ * ConditionCheck
 —  Applies a condition to an item
+ * that is not being modified by the transaction. This structure specifies the
+ * primary key of the item to be checked, the name of the table where it resides, a
+ * condition expression that must be satisfied for the transaction to succeed, and
+ * a field indicating whether to retrieve the item's attributes if the condition is
+ * not met.
DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
DynamoDB rejects the entire TransactWriteItems
request if any of the
+ * following is true:
A condition in one of the condition expressions is not met.
+ *A condition in one of the condition expressions is not met.
*An ongoing operation is in the process of updating the same - * item.
+ *An ongoing operation is in the process of updating the same item.
*There is insufficient provisioned capacity for the transaction - * to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too - * large, or a similar validation error occurs because of changes made by the - * transaction.
+ *An item size becomes too large (bigger than 400 KB), a local secondary index + * (LSI) becomes too large, or a similar validation error occurs because of changes + * made by the transaction.
*The aggregate size of the items in the transaction exceeds 4 MB.
+ *The aggregate size of the items in the transaction exceeds 4 MB.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*Removes the association of tags from an Amazon DynamoDB resource. You can call
* UntagResource
up to five times per second, per account.
For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
*/ public untagResource( args: UntagResourceCommandInput, @@ -2197,15 +2229,15 @@ export class DynamoDB extends DynamoDBClient { /** *
- * UpdateContinuousBackups
enables or disables point in time recovery for the specified table.
- * A successful UpdateContinuousBackups
call returns the current ContinuousBackupsDescription
.
- * Continuous backups are ENABLED
on all tables at table creation.
- * If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
- * Once continuous backups and point in time recovery are enabled, you can restore to any point in time within
- * EarliestRestorableDateTime
and LatestRestorableDateTime
.
- *
+ * UpdateContinuousBackups
enables or disables point in time recovery for
+ * the specified table. A successful UpdateContinuousBackups
call returns the
+ * current ContinuousBackupsDescription
. Continuous backups are
+ * ENABLED
on all tables at table creation. If point in time recovery is
+ * enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore to
+ * any point in time within EarliestRestorableDateTime
and
+ * LatestRestorableDateTime
.
* LatestRestorableDateTime
is typically 5 minutes before the current time.
* You can restore your table to any point in time during the last 35 days.
Updates the status for contributor insights for a specific table or index.
+ *Updates the status for contributor insights for a specific table or index. CloudWatch + * Contributor Insights for DynamoDB graphs display the partition key and (if applicable) + * sort key of frequently accessed items and frequently throttled items in plaintext. If + * you require the use of AWS Key Management Service (KMS) to encrypt this table’s + * partition key and sort key data with an AWS managed key or customer managed key, you + * should not enable CloudWatch Contributor Insights for DynamoDB for this table.
*/ public updateContributorInsights( args: UpdateContributorInsightsCommandInput, @@ -2276,28 +2313,23 @@ export class DynamoDB extends DynamoDBClient { * same name as the global table, have the same key schema, have DynamoDB Streams enabled, * and have the same provisioned and maximum write capacity units. *Although you can use UpdateGlobalTable
to add replicas and remove replicas in
- * a single request, for simplicity we recommend that you issue separate requests for
- * adding or removing replicas.
- * If global secondary indexes are specified, then the following conditions must also be met: - *
+ *Although you can use UpdateGlobalTable
to add replicas and remove
+ * replicas in a single request, for simplicity we recommend that you issue separate
+ * requests for adding or removing replicas.
If global secondary indexes are specified, then the following conditions must also be + * met:
*- * The global secondary indexes must have the same name. - *
+ *The global secondary indexes must have the same name.
*- * The global secondary indexes must have the same hash key and sort key (if present). - *
+ *The global secondary indexes must have the same hash key and sort key (if + * present).
*- * The global secondary indexes must have the same provisioned and maximum write capacity units. - *
+ *The global secondary indexes must have the same provisioned and maximum write + * capacity units.
*Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
- *You can also return the item's attribute values in the same UpdateItem
- * operation using the ReturnValues
parameter.
Edits an existing item's attributes, or adds a new item to the table if it does not + * already exist. You can put, delete, or add attribute values. You can also perform a + * conditional update on an existing item (insert a new attribute name-value pair if it + * doesn't exist, or replace an existing name-value pair if it has certain expected + * attribute values).
+ *You can also return the item's attribute values in the same UpdateItem
+ * operation using the ReturnValues
parameter.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
- *You can only perform one of the following operations at once:
- *Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB + * Streams settings for a given table.
+ *You can only perform one of the following operations at once:
+ *Modify the provisioned throughput settings of the table.
+ *Modify the provisioned throughput settings of the table.
*Enable or disable DynamoDB Streams on the table.
+ *Enable or disable DynamoDB Streams on the table.
*Remove a global secondary index from the table.
+ *Remove a global secondary index from the table.
*Create a new global secondary index on the table. After the index begins + *
Create a new global secondary index on the table. After the index begins
* backfilling, you can use UpdateTable
to perform other
* operations.
- * UpdateTable
is an asynchronous operation; while it is executing, the table status
- * changes from ACTIVE
to UPDATING
. While it is UPDATING
,
- * you cannot issue another UpdateTable
request. When the table returns to the
- * ACTIVE
state, the UpdateTable
operation is complete.
+ * UpdateTable
is an asynchronous operation; while it is executing, the table
+ * status changes from ACTIVE
to UPDATING
. While it is
+ * UPDATING
, you cannot issue another UpdateTable
request.
+ * When the table returns to the ACTIVE
state, the UpdateTable
+ * operation is complete.
Updates auto scaling settings on your global tables at once.
- *This operation only applies to Version 2019.11.21 of global tables.
- *This operation only applies to Version + * 2019.11.21 of global tables.
+ *The UpdateTimeToLive
method enables or disables Time to Live (TTL) for the
- * specified table. A successful UpdateTimeToLive
call returns the current
+ *
The UpdateTimeToLive
method enables or disables Time to Live (TTL) for
+ * the specified table. A successful UpdateTimeToLive
call returns the current
* TimeToLiveSpecification
. It can take up to one hour for the change to
* fully process. Any additional UpdateTimeToLive
calls for the same table
* during this one hour duration result in a ValidationException
.
TTL compares the current time in epoch time format to the time stored in the TTL + *
TTL compares the current time in epoch time format to the time stored in the TTL * attribute of an item. If the epoch time value stored in the attribute is less than the * current time, the item is marked as expired and subsequently deleted.
*The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, - * 1970 UTC.
+ *The epoch time format is the number of seconds elapsed since 12:00:00 AM January + * 1, 1970 UTC.
*DynamoDB deletes expired items on a best-effort basis to ensure availability of - * throughput for other data operations. - *
- *DynamoDB typically deletes expired items within two days of expiration. The exact duration - * within which an item gets deleted after expiration is specific to the - * nature of the workload. Items that have expired and not been deleted will still show up in reads, - * queries, and scans.
- *DynamoDB typically deletes expired items within two days of expiration. The exact + * duration within which an item gets deleted after expiration is specific to the + * nature of the workload. Items that have expired and not been deleted will still show + * up in reads, queries, and scans.
+ *As items are deleted, they are removed from any local secondary index and global * secondary index immediately in the same eventually consistent way as a standard delete * operation.
- *For more information, see Time To Live in the Amazon DynamoDB Developer Guide.
+ *For more information, see Time To Live in the + * Amazon DynamoDB Developer Guide.
*/ public updateTimeToLive( args: UpdateTimeToLiveCommandInput, diff --git a/clients/client-dynamodb/src/DynamoDBClient.ts b/clients/client-dynamodb/src/DynamoDBClient.ts index fed0668aaafcb..70b6a1d3e5028 100644 --- a/clients/client-dynamodb/src/DynamoDBClient.ts +++ b/clients/client-dynamodb/src/DynamoDBClient.ts @@ -425,23 +425,24 @@ export interface DynamoDBClientResolvedConfig extends DynamoDBClientResolvedConf /** *Amazon DynamoDB is a fully managed NoSQL database service that provides fast + * and predictable performance with seamless scalability. DynamoDB lets you + * offload the administrative burdens of operating and scaling a distributed database, so + * that you don't have to worry about hardware provisioning, setup and configuration, + * replication, software patching, or cluster scaling.
* - *Amazon DynamoDB is a fully managed NoSQL database service that provides fast and - * predictable performance with seamless scalability. DynamoDB lets you offload the - * administrative burdens of operating and scaling a distributed database, so that you don't have - * to worry about hardware provisioning, setup and configuration, replication, software patching, - * or cluster scaling.
+ *With DynamoDB, you can create database tables that can store and retrieve + * any amount of data, and serve any level of request traffic. You can scale up or scale + * down your tables' throughput capacity without downtime or performance degradation, and + * use the Amazon Web Services Management Console to monitor resource utilization and performance + * metrics.
* - *With DynamoDB, you can create database tables that can store and retrieve any amount of - * data, and serve any level of request traffic. You can scale up or scale down your tables' - * throughput capacity without downtime or performance degradation, and use the AWS Management - * Console to monitor resource utilization and performance metrics.
- * - *DynamoDB automatically spreads the data and traffic for your tables over a sufficient - * number of servers to handle your throughput and storage requirements, while maintaining - * consistent and fast performance. All of your data is stored on solid state disks (SSDs) and - * automatically replicated across multiple Availability Zones in an AWS region, providing - * built-in high availability and data durability.
+ *DynamoDB automatically spreads the data and traffic for your tables over + * a sufficient number of servers to handle your throughput and storage requirements, while + * maintaining consistent and fast performance. All of your data is stored on solid state + * disks (SSDs) and automatically replicated across multiple Availability Zones in an + * Amazon Web Services Region, providing built-in high availability and data + * durability.
*/ export class DynamoDBClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-dynamodb/src/commands/BatchExecuteStatementCommand.ts b/clients/client-dynamodb/src/commands/BatchExecuteStatementCommand.ts index 95261d8a492cf..6d4cdf3f06242 100644 --- a/clients/client-dynamodb/src/commands/BatchExecuteStatementCommand.ts +++ b/clients/client-dynamodb/src/commands/BatchExecuteStatementCommand.ts @@ -22,9 +22,12 @@ export interface BatchExecuteStatementCommandInput extends BatchExecuteStatement export interface BatchExecuteStatementCommandOutput extends BatchExecuteStatementOutput, __MetadataBearer {} /** - *- * This operation allows you to perform batch reads and writes on data stored in DynamoDB, using PartiQL. - *
+ *This operation allows you to perform batch reads or writes on data stored in DynamoDB, + * using PartiQL.
+ *The entire batch must consist of either read statements or write statements, you + * cannot mix both in one batch.
+ *The BatchGetItem
operation returns the attributes of one or more items from one or
- * more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100 + *
The BatchGetItem
operation returns the attributes of one or more items
+ * from one or more tables. You identify requested items by primary key.
A single operation can retrieve up to 16 MB of data, which can contain as many as 100
* items. BatchGetItem
returns a partial result if the response size limit is
* exceeded, the table's provisioned throughput is exceeded, or an internal processing
* failure occurs. If a partial result is returned, the operation returns a value for
* UnprocessedKeys
. You can use this value to retry the operation starting
* with the next item to get.
If you request more than 100 items, BatchGetItem
returns a
* ValidationException
with the message "Too many items requested for
* the BatchGetItem call."
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in + *
For example, if you ask to retrieve 100 items, but each individual item is 300 KB in
* size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns
* an appropriate UnprocessedKeys
value so you can get the next page of
* results. If desired, your application can include its own logic to assemble the pages of
* results into one dataset.
If none of the items can be processed due to insufficient + *
If none of the items can be processed due to insufficient
* provisioned throughput on all of the tables in the request, then
* BatchGetItem
returns a
* ProvisionedThroughputExceededException
. If at least
* one of the items is successfully processed, then
* BatchGetItem
completes successfully, while returning the keys of the
* unread items in UnprocessedKeys
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on those - * items. However, we strongly recommend that you use an exponential backoff algorithm. - * If you retry the batch operation immediately, the underlying read or write requests can - * still fail due to throttling on the individual tables. If you delay the batch operation - * using exponential backoff, the individual requests in the batch are much more likely to - * succeed.
- *For more information, see Batch - * Operations and Error Handling in the Amazon DynamoDB Developer Guide.
- *By default, BatchGetItem
performs eventually consistent reads on every table in the
- * request. If you want strongly consistent reads instead, you can set ConsistentRead
to
- * true
for any or all tables.
In order to minimize response latency, BatchGetItem
retrieves items in parallel.
When designing your application, keep in mind that DynamoDB does not return items in any
- * particular order. To help parse the response by item, include the primary key values for the
- * items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for
+ * If DynamoDB returns any unprocessed items, you should retry the batch operation on
+ * those items. However, we strongly recommend that you use an exponential
+ * backoff algorithm. If you retry the batch operation immediately, the
+ * underlying read or write requests can still fail due to throttling on the individual
+ * tables. If you delay the batch operation using exponential backoff, the individual
+ * requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB
+ * Developer Guide.
By default, BatchGetItem
performs eventually consistent reads on every
+ * table in the request. If you want strongly consistent reads instead, you can set
+ * ConsistentRead
to true
for any or all tables.
In order to minimize response latency, BatchGetItem
retrieves items in
+ * parallel.
When designing your application, keep in mind that DynamoDB does not return items in
+ * any particular order. To help parse the response by item, include the primary key values
+ * for the items in your request in the ProjectionExpression
parameter.
If a requested item does not exist, it is not returned in the result. Requests for * nonexistent items consume the minimum read capacity units according to the type of read. * For more information, see Working with Tables in the Amazon DynamoDB Developer * Guide.
diff --git a/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts b/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts index 198620b08302b..56c366dfc7be6 100644 --- a/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts +++ b/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts @@ -22,79 +22,83 @@ export interface BatchWriteItemCommandInput extends BatchWriteItemInput {} export interface BatchWriteItemCommandOutput extends BatchWriteItemOutput, __MetadataBearer {} /** - *The BatchWriteItem
operation puts or deletes multiple items in one or more
- * tables. A single call to BatchWriteItem
can write up to 16 MB of data,
+ *
The BatchWriteItem
operation puts or deletes multiple items in one or
+ * more tables. A single call to BatchWriteItem
can write up to 16 MB of data,
* which can comprise as many as 25 put or delete requests. Individual items to be written
* can be as large as 400 KB.
- * BatchWriteItem
cannot update items. To update items, use the UpdateItem
- * action.
The individual PutItem
and DeleteItem
operations specified in
- * BatchWriteItem
are atomic; however BatchWriteItem
as a whole is not. If any
- * requested operations fail because the table's provisioned throughput is exceeded or an
- * internal processing failure occurs, the failed operations are returned in the
- * UnprocessedItems
response parameter. You can investigate and optionally resend the
- * requests. Typically, you would call BatchWriteItem
in a loop. Each iteration would
- * check for unprocessed items and submit a new BatchWriteItem
request with those
- * unprocessed items until all items have been processed.
If none of the items can be processed due to insufficient
+ * BatchWriteItem
cannot update items. To update items, use the
+ * UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified
+ * in BatchWriteItem
are atomic; however BatchWriteItem
as a
+ * whole is not. If any requested operations fail because the table's provisioned
+ * throughput is exceeded or an internal processing failure occurs, the failed operations
+ * are returned in the UnprocessedItems
response parameter. You can
+ * investigate and optionally resend the requests. Typically, you would call
+ * BatchWriteItem
in a loop. Each iteration would check for unprocessed
+ * items and submit a new BatchWriteItem
request with those unprocessed items
+ * until all items have been processed.
If none of the items can be processed due to insufficient
* provisioned throughput on all of the tables in the request, then
* BatchWriteItem
returns a
* ProvisionedThroughputExceededException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on those - * items. However, we strongly recommend that you use an exponential backoff algorithm. - * If you retry the batch operation immediately, the underlying read or write requests can - * still fail due to throttling on the individual tables. If you delay the batch operation - * using exponential backoff, the individual requests in the batch are much more likely to - * succeed.
+ *If DynamoDB returns any unprocessed items, you should retry the batch operation on + * those items. However, we strongly recommend that you use an exponential + * backoff algorithm. If you retry the batch operation immediately, the + * underlying read or write requests can still fail due to throttling on the individual + * tables. If you delay the batch operation using exponential backoff, the individual + * requests in the batch are much more likely to succeed.
*For more information, see Batch Operations and Error Handling in the Amazon DynamoDB * Developer Guide.
- *With BatchWriteItem
, you can efficiently write or delete large amounts of
+ *
With BatchWriteItem
, you can efficiently write or delete large amounts of
* data, such as from Amazon EMR, or copy data from another database into DynamoDB. In
* order to improve performance with these large-scale operations,
* BatchWriteItem
does not behave in the same way as individual
* PutItem
and DeleteItem
calls would. For example, you
* cannot specify conditions on individual put and delete requests, and
* BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use
- * threads to write items in parallel. Your application must include the necessary logic to
- * manage the threads. With languages that don't support threading, you must update
- * or delete the specified items one at a time. In both situations, BatchWriteItem
- * performs the specified put and delete operations in
- * parallel, giving you the power of the thread pool approach without having to introduce
- * complexity into your application.
Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit.
- *If one or more of the following is true, DynamoDB rejects the entire batch write operation:
- *If you use a programming language that supports concurrency, you can use threads to
+ * write items in parallel. Your application must include the necessary logic to manage the
+ * threads. With languages that don't support threading, you must update or delete the
+ * specified items one at a time. In both situations, BatchWriteItem
performs
+ * the specified put and delete operations in parallel, giving you the power of the thread
+ * pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request + * consumes the same number of write capacity units whether it is processed in parallel or + * not. Delete operations on nonexistent items consume one write capacity unit.
+ *If one or more of the following is true, DynamoDB rejects the entire batch write + * operation:
+ *One or more tables specified in the BatchWriteItem
request does not exist.
One or more tables specified in the BatchWriteItem
request does
+ * not exist.
Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema.
+ *Primary key attributes specified on an item in the request do not match those + * in the corresponding table's primary key schema.
*You try to perform multiple operations on the same item in the same BatchWriteItem
- * request. For example, you cannot put and delete the same item in the same
- * BatchWriteItem
request.
You try to perform multiple operations on the same item in the same
+ * BatchWriteItem
request. For example, you cannot put and delete
+ * the same item in the same BatchWriteItem
request.
- * Your request contains at least two items with identical hash and range keys (which essentially is two put operations). - *
- *Your request contains at least two items with identical hash and range keys + * (which essentially is two put operations).
+ * *There are more than 25 requests in the batch.
+ *There are more than 25 requests in the batch.
*Any individual item in a batch exceeds 400 KB.
+ *Any individual item in a batch exceeds 400 KB.
*The total request size exceeds 16 MB.
+ *The total request size exceeds 16 MB.
*Creates a backup for an existing table.
- *Each time you create an on-demand backup, the entire table data is backed up. There + *
Each time you create an on-demand backup, the entire table data is backed up. There * is no limit to the number of on-demand backups that can be taken.
- *When you create an on-demand backup, a time marker of the request is cataloged, and + *
When you create an on-demand backup, a time marker of the request is cataloged, and * the backup is created asynchronously, by applying all changes until the time of the * request to the last full table snapshot. Backup requests are processed instantaneously * and become available for restore within minutes.
- *You can call CreateBackup
at a maximum rate of 50 times per second.
All backups in DynamoDB work without consuming any provisioned throughput on the table.
- *If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed - * to contain all data committed to the table up to 14:24:00, and data committed after + *
You can call CreateBackup
at a maximum rate of 50 times per
+ * second.
All backups in DynamoDB work without consuming any provisioned throughput on the + * table.
+ *If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed to + * contain all data committed to the table up to 14:24:00, and data committed after * 14:26:00 will not be. The backup might contain data modifications made between 14:24:00 * and 14:26:00. On-demand backup does not support causal consistency.
- *- * Along with data, the following are also included on the backups: - *
- *Along with data, the following are also included on the backups:
+ *Global secondary indexes (GSIs)
- *Global secondary indexes (GSIs)
+ * *Local secondary indexes (LSIs)
- *Local secondary indexes (LSIs)
+ * *Streams
- *Streams
+ * *Provisioned read and write capacity
- *Provisioned read and write capacity
+ * *Creates a global table from an existing table. A global table creates a replication * relationship between two or more DynamoDB tables with the same table name in the * provided Regions.
- *This operation only applies to Version 2017.11.29 of global tables.
- *This operation only applies to Version + * 2017.11.29 of global tables.
+ *If you want to add a new replica table to a global table, each of the following conditions - * must be true:
- *If you want to add a new replica table to a global table, each of the following + * conditions must be true:
+ *The table must have the same primary key as all of the other replicas.
+ *The table must have the same primary key as all of the other replicas.
*The table must have the same name as all of the other replicas.
+ *The table must have the same name as all of the other replicas.
*The table must have DynamoDB Streams enabled, with the stream containing both the new and the old - * images of the item.
+ *The table must have DynamoDB Streams enabled, with the stream containing both + * the new and the old images of the item.
*None of the replica tables in the global table can contain any data.
+ *None of the replica tables in the global table can contain any data.
*- * If global secondary indexes are specified, then the following conditions must also be met: - *
+ *If global secondary indexes are specified, then the following conditions must also be + * met:
*- * The global secondary indexes must have the same name. - *
+ *The global secondary indexes must have the same name.
*- * The global secondary indexes must have the same hash key and sort key (if present). - *
+ *The global secondary indexes must have the same hash key and sort key (if + * present).
*- * If local secondary indexes are specified, then the following conditions must also be met: - *
+ *If local secondary indexes are specified, then the following conditions must also be + * met:
*- * The local secondary indexes must have the same name. - *
+ *The local secondary indexes must have the same name.
*- * The local secondary indexes must have the same hash key and sort key (if present). - *
+ *The local secondary indexes must have the same hash key and sort key (if + * present).
*- * Write capacity settings should be set consistently across your replica tables and - * secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write - * capacity settings for all of your global tables replicas and indexes. - *
- *- * If you prefer to manage write capacity settings manually, you should provision equal - * replicated write capacity units to your replica tables. You should also provision - * equal replicated write capacity units to matching secondary indexes across - * your global table. - *
- *Write capacity settings should be set consistently across your replica tables and + * secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the + * write capacity settings for all of your global tables replicas and indexes.
+ *If you prefer to manage write capacity settings manually, you should provision + * equal replicated write capacity units to your replica tables. You should also + * provision equal replicated write capacity units to matching secondary indexes across + * your global table.
+ *The CreateTable
operation adds a new table to your account. In an AWS
- * account, table names must be unique within each Region. That is, you can have two tables
- * with same name if you create the tables in different Regions.
- * CreateTable
is an asynchronous operation. Upon receiving a CreateTable
request,
- * DynamoDB immediately returns a response with a TableStatus
of CREATING
. After
- * the table is created, DynamoDB sets the TableStatus
to ACTIVE
. You can
- * perform read and write operations only on an ACTIVE
table.
You can optionally define secondary indexes on the new table, as part of the CreateTable
- * operation. If you want to create multiple tables with secondary indexes on them, you must create the
- * tables sequentially. Only one table with secondary indexes can be in the CREATING
state at
- * any given time.
You can use the DescribeTable
action to check the table status.
The CreateTable
operation adds a new table to your account. In an Amazon Web Services account, table names must be unique within each Region. That is, you can
+ * have two tables with same name if you create the tables in different Regions.
+ * CreateTable
is an asynchronous operation. Upon receiving a
+ * CreateTable
request, DynamoDB immediately returns a response with a
+ * TableStatus
of CREATING
. After the table is created,
+ * DynamoDB sets the TableStatus
to ACTIVE
. You can perform read
+ * and write operations only on an ACTIVE
table.
You can optionally define secondary indexes on the new table, as part of the
+ * CreateTable
operation. If you want to create multiple tables with
+ * secondary indexes on them, you must create the tables sequentially. Only one table with
+ * secondary indexes can be in the CREATING
state at any given time.
You can use the DescribeTable
action to check the table status.
Deletes an existing backup of a table.
- *You can call DeleteBackup
at a maximum rate of 10 times per second.
You can call DeleteBackup
at a maximum rate of 10 times per
+ * second.
Deletes a single item in a table by primary key. You can perform a conditional delete operation that deletes the item if it exists, or if it has an expected attribute value.
- *In addition to deleting an item, you can also return the item's attribute values in the same
- * operation, using the ReturnValues
parameter.
Unless you specify conditions, the DeleteItem
is an idempotent operation; running it
- * multiple times on the same item or attribute does not result in an error response.
Conditional deletes are useful for deleting items only if specific conditions are met. If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not deleted.
+ *Deletes a single item in a table by primary key. You can perform a conditional delete + * operation that deletes the item if it exists, or if it has an expected attribute + * value.
+ *In addition to deleting an item, you can also return the item's attribute values in
+ * the same operation, using the ReturnValues
parameter.
Unless you specify conditions, the DeleteItem
is an idempotent operation;
+ * running it multiple times on the same item or attribute does not
+ * result in an error response.
Conditional deletes are useful for deleting items only if specific conditions are met. + * If those conditions are met, DynamoDB performs the delete. Otherwise, the item is not + * deleted.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/DeleteTableCommand.ts b/clients/client-dynamodb/src/commands/DeleteTableCommand.ts index c614511e78de1..6c03706cb0089 100644 --- a/clients/client-dynamodb/src/commands/DeleteTableCommand.ts +++ b/clients/client-dynamodb/src/commands/DeleteTableCommand.ts @@ -23,22 +23,24 @@ export interface DeleteTableCommandOutput extends DeleteTableOutput, __MetadataB /** *The DeleteTable
operation deletes a table and all of its items. After a
- * DeleteTable
request, the specified table is in the DELETING
state until
- * DynamoDB completes the deletion. If the table is in the ACTIVE
state, you can delete
- * it. If a table is in CREATING
or UPDATING
states, then DynamoDB returns
- * a ResourceInUseException
. If the specified table does not exist, DynamoDB returns a
- * ResourceNotFoundException
. If table is already in the DELETING
state, no
- * error is returned.
DynamoDB might continue to accept data read and write operations, such as GetItem
and
- * PutItem
, on a table in the DELETING
state until the table deletion is
- * complete.
When you delete a table, any indexes on that table are also deleted.
- *If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes
- * into the DISABLED
state, and the stream is automatically deleted after 24 hours.
DeleteTable
request, the specified table is in the
+ * DELETING
state until DynamoDB completes the deletion. If the table is
+ * in the ACTIVE
state, you can delete it. If a table is in
+ * CREATING
or UPDATING
states, then DynamoDB returns a
+ * ResourceInUseException
. If the specified table does not exist, DynamoDB
+ * returns a ResourceNotFoundException
. If table is already in the
+ * DELETING
state, no error is returned.
+ * DynamoDB might continue to accept data read and write operations, such as
+ * GetItem
and PutItem
, on a table in the
+ * DELETING
state until the table deletion is complete.
When you delete a table, any indexes on that table are also deleted.
+ *If you have DynamoDB Streams enabled on the table, then the corresponding stream on
+ * that table goes into the DISABLED
state, and the stream is automatically
+ * deleted after 24 hours.
Use the DescribeTable
action to check the status of the table.
Use the DescribeTable
action to check the status of the table.
Describes an existing backup of a table.
- *You can call DescribeBackup
at a maximum rate of 10 times per second.
You can call DescribeBackup
at a maximum rate of 10 times per
+ * second.
Checks the status of continuous backups and point in time recovery on the specified table.
- * Continuous backups are ENABLED
on all tables at table creation.
- * If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to any
- * point in time within EarliestRestorableDateTime
and
+ *
Checks the status of continuous backups and point in time recovery on the specified
+ * table. Continuous backups are ENABLED
on all tables at table creation. If
+ * point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to
+ * ENABLED.
After continuous backups and point in time recovery are enabled, you can restore to
+ * any point in time within EarliestRestorableDateTime
and
* LatestRestorableDateTime
.
- * LatestRestorableDateTime
is typically 5 minutes before the current time. You can restore your table to any point
- * in time during the last 35 days.
- *
You can call DescribeContinuousBackups
at a maximum rate of 10 times per second.
+ * LatestRestorableDateTime
is typically 5 minutes before the current time.
+ * You can restore your table to any point in time during the last 35 days.
You can call DescribeContinuousBackups
at a maximum rate of 10 times per
+ * second.
Returns information about contributor insights, for a given table or global secondary index.
+ *Returns information about contributor insights, for a given table or global secondary + * index.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/DescribeGlobalTableCommand.ts b/clients/client-dynamodb/src/commands/DescribeGlobalTableCommand.ts index d0c3956e55470..322d66bb2839e 100644 --- a/clients/client-dynamodb/src/commands/DescribeGlobalTableCommand.ts +++ b/clients/client-dynamodb/src/commands/DescribeGlobalTableCommand.ts @@ -23,10 +23,11 @@ export interface DescribeGlobalTableCommandOutput extends DescribeGlobalTableOut /** *Returns information about the specified global table.
- *This operation only applies to Version 2017.11.29 of global tables. - * If you are using global tables Version 2019.11.21 you can use DescribeTable instead.
- *This operation only applies to Version + * 2017.11.29 of global tables. If you are using global tables Version + * 2019.11.21 you can use DescribeTable instead.
+ *Describes Region-specific settings for a global table.
- *This operation only applies to Version 2017.11.29 of global tables.
- *This operation only applies to Version + * 2017.11.29 of global tables.
+ *Returns the current provisioned-capacity quotas for your AWS account in a Region, both - * for the Region as a whole and for any one DynamoDB table that you create there.
- *When you establish an AWS account, the account has initial quotas on the maximum read - * capacity units and write capacity units that you can provision across all of your - * DynamoDB tables in a given Region. Also, there are per-table quotas that apply when you - * create a table there. For more information, see Service, Account, and Table - * Quotas page in the Amazon DynamoDB Developer - * Guide.
+ *Returns the current provisioned-capacity quotas for your Amazon Web Services account in + * a Region, both for the Region as a whole and for any one DynamoDB table that you create + * there.
+ *When you establish an Amazon Web Services account, the account has initial quotas on + * the maximum read capacity units and write capacity units that you can provision across + * all of your DynamoDB tables in a given Region. Also, there are per-table + * quotas that apply when you create a table there. For more information, see Service, + * Account, and Table Quotas page in the Amazon DynamoDB + * Developer Guide.
* - *Although you can increase these quotas by filing a case at AWS Support Center, obtaining the increase is not
- * instantaneous. The DescribeLimits
action lets you write code to compare the
- * capacity you are currently using to those quotas imposed by your account so that you
- * have enough time to apply for an increase before you hit a quota.
Although you can increase these quotas by filing a case at Amazon Web Services Support Center, obtaining the
+ * increase is not instantaneous. The DescribeLimits
action lets you write
+ * code to compare the capacity you are currently using to those quotas imposed by your
+ * account so that you have enough time to apply for an increase before you hit a
+ * quota.
For example, you could use one of the AWS SDKs to do the following:
+ *For example, you could use one of the Amazon Web Services SDKs to do the + * following:
* - *Call DescribeLimits
for a particular Region to obtain your current
- * account quotas on provisioned capacity there.
Call DescribeLimits
for a particular Region to obtain your
+ * current account quotas on provisioned capacity there.
Create a variable to hold the aggregate read capacity units provisioned for all - * your tables in that Region, and one to hold the aggregate write capacity units. - * Zero them both.
+ *Create a variable to hold the aggregate read capacity units provisioned for + * all your tables in that Region, and one to hold the aggregate write capacity + * units. Zero them both.
*Call ListTables
to obtain a list of all your DynamoDB tables.
Call ListTables
to obtain a list of all your DynamoDB
+ * tables.
For each table name listed by ListTables
, do the following:
For each table name listed by ListTables
, do the
+ * following:
Call DescribeTable
with the table name.
Call DescribeTable
with the table name.
Use the data returned by DescribeTable
to add the read capacity units and write capacity
- * units provisioned for the table itself to your variables.
Use the data returned by DescribeTable
to add the read
+ * capacity units and write capacity units provisioned for the table itself
+ * to your variables.
If the table has one or more global secondary indexes (GSIs), loop over these GSIs and add their provisioned capacity values to your variables as well.
- *If the table has one or more global secondary indexes (GSIs), loop + * over these GSIs and add their provisioned capacity values to your + * variables as well.
+ * *Report the account quotas for that Region returned by DescribeLimits
, along with
- * the total current provisioned capacity levels you have calculated.
Report the account quotas for that Region returned by
+ * DescribeLimits
, along with the total current provisioned
+ * capacity levels you have calculated.
This will let you see whether you are getting close to your account-level quotas.
- *The per-table quotas apply only when you are creating a new table. They restrict the sum - * of the provisioned capacity of the new table itself and all its global secondary + *
This will let you see whether you are getting close to your account-level + * quotas.
+ *The per-table quotas apply only when you are creating a new table. They restrict the + * sum of the provisioned capacity of the new table itself and all its global secondary * indexes.
- *For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned + *
For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned * capacity extremely rapidly, but the only quota that applies is that the aggregate * provisioned capacity over all your tables and GSIs cannot exceed either of the * per-account quotas.
- *
- * DescribeLimits
should only be called periodically. You can expect throttling
- * errors if you call it more than once in a minute.
The DescribeLimits
Request element has no content.
DescribeLimits
should only be called periodically. You can expect
+ * throttling errors if you call it more than once in a minute.
+ * The DescribeLimits
Request element has no content.
Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table.
- *If you issue a DescribeTable
request immediately after a CreateTable
request, DynamoDB might
- * return a ResourceNotFoundException
. This is because DescribeTable
uses an eventually
- * consistent query, and the metadata for your table might not be available at that moment.
- * Wait for a few seconds, and then try the DescribeTable
request again.
Returns information about the table, including the current status of the table, when + * it was created, the primary key schema, and any indexes on the table.
+ *If you issue a DescribeTable
request immediately after a
+ * CreateTable
request, DynamoDB might return a
+ * ResourceNotFoundException
. This is because
+ * DescribeTable
uses an eventually consistent query, and the metadata
+ * for your table might not be available at that moment. Wait for a few seconds, and
+ * then try the DescribeTable
request again.
Describes auto scaling settings across replicas of the global table at once.
- *This operation only applies to Version 2019.11.21 of global tables.
- *This operation only applies to Version + * 2019.11.21 of global tables.
+ *Starts table data replication to the specified Kinesis data stream at a timestamp chosen - * during the enable workflow. If this operation doesn't return results immediately, use - * DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream is - * ACTIVE.
+ *Starts table data replication to the specified Kinesis data stream at a timestamp + * chosen during the enable workflow. If this operation doesn't return results immediately, + * use DescribeKinesisStreamingDestination to check if streaming to the Kinesis data stream + * is ACTIVE.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/ExecuteStatementCommand.ts b/clients/client-dynamodb/src/commands/ExecuteStatementCommand.ts index aa91eebf81909..3dc32baff462f 100644 --- a/clients/client-dynamodb/src/commands/ExecuteStatementCommand.ts +++ b/clients/client-dynamodb/src/commands/ExecuteStatementCommand.ts @@ -22,9 +22,8 @@ export interface ExecuteStatementCommandInput extends ExecuteStatementInput {} export interface ExecuteStatementCommandOutput extends ExecuteStatementOutput, __MetadataBearer {} /** - *- * This operation allows you to perform reads and singleton writes on data stored in DynamoDB, using PartiQL. - *
+ *This operation allows you to perform reads and singleton writes on data stored in + * DynamoDB, using PartiQL.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/ExecuteTransactionCommand.ts b/clients/client-dynamodb/src/commands/ExecuteTransactionCommand.ts index cce30f0454f14..73a67c6f04e1a 100644 --- a/clients/client-dynamodb/src/commands/ExecuteTransactionCommand.ts +++ b/clients/client-dynamodb/src/commands/ExecuteTransactionCommand.ts @@ -22,9 +22,14 @@ export interface ExecuteTransactionCommandInput extends ExecuteTransactionInput export interface ExecuteTransactionCommandOutput extends ExecuteTransactionOutput, __MetadataBearer {} /** - *- * This operation allows you to perform transactional reads or writes on data stored in DynamoDB, using PartiQL. - *
+ *This operation allows you to perform transactional reads or writes on data stored in + * DynamoDB, using PartiQL.
+ *The entire transaction must consist of either read statements or write statements,
+ * you cannot mix both in one transaction. The EXISTS function is an exception and can
+ * be used to check the condition of specific attributes of the item in a similar
+ * manner to ConditionCheck
in the TransactWriteItems API.
The GetItem
operation returns a set of attributes for the item with the given primary
- * key. If there is no matching item, GetItem
does not return any data and there will be no Item
element in the response.
- * GetItem
provides an eventually consistent read by default. If your application
- * requires a strongly consistent read, set ConsistentRead
to true
. Although
- * a strongly consistent read might take more time than an eventually consistent read, it always
- * returns the last updated value.
The GetItem
operation returns a set of attributes for the item with the
+ * given primary key. If there is no matching item, GetItem
does not return
+ * any data and there will be no Item
element in the response.
+ * GetItem
provides an eventually consistent read by default. If your
+ * application requires a strongly consistent read, set ConsistentRead
to
+ * true
. Although a strongly consistent read might take more time than an
+ * eventually consistent read, it always returns the last updated value.
List backups associated with an AWS account. To list backups for a given table, specify
- * TableName
. ListBackups
returns a paginated list of results
- * with at most 1 MB worth of items in a page. You can also specify a maximum number of
- * entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these + *
List backups associated with an Amazon Web Services account. To list backups for a
+ * given table, specify TableName
. ListBackups
returns a
+ * paginated list of results with at most 1 MB worth of items in a page. You can also
+ * specify a maximum number of entries to be returned in a page.
In the request, start time is inclusive, but end time is exclusive. Note that these * boundaries are for the time at which the original backup was requested.
- *You can call ListBackups
a maximum of five times per second.
You can call ListBackups
a maximum of five times per second.
Returns a list of ContributorInsightsSummary for a table and all its global secondary indexes.
+ *Returns a list of ContributorInsightsSummary for a table and all its global secondary + * indexes.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/ListGlobalTablesCommand.ts b/clients/client-dynamodb/src/commands/ListGlobalTablesCommand.ts index 7705692fc5fc7..4256d191fbbe6 100644 --- a/clients/client-dynamodb/src/commands/ListGlobalTablesCommand.ts +++ b/clients/client-dynamodb/src/commands/ListGlobalTablesCommand.ts @@ -23,9 +23,10 @@ export interface ListGlobalTablesCommandOutput extends ListGlobalTablesOutput, _ /** *Lists all global tables that have a replica in the specified Region.
- *This operation only applies to Version 2017.11.29 of global tables.
- *This operation only applies to Version + * 2017.11.29 of global tables.
+ *Returns an array of table names associated with the current account and endpoint. The output
- * from ListTables
is paginated, with each page returning a maximum of 100 table
- * names.
Returns an array of table names associated with the current account and endpoint. The
+ * output from ListTables
is paginated, with each page returning a maximum of
+ * 100 table names.
List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 times per second, per account.
- *For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource up to 10 + * times per second, per account.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/PutItemCommand.ts b/clients/client-dynamodb/src/commands/PutItemCommand.ts index 3879dbb8e6a11..4b6acbb93868b 100644 --- a/clients/client-dynamodb/src/commands/PutItemCommand.ts +++ b/clients/client-dynamodb/src/commands/PutItemCommand.ts @@ -19,74 +19,81 @@ export interface PutItemCommandInput extends PutItemInput {} export interface PutItemCommandOutput extends PutItemOutput, __MetadataBearer {} /** - *Creates a new item, or replaces an old item with a new item. If an item that has the same primary key as the new item already exists in the specified table, the new item completely replaces the existing item. You can perform a conditional put operation (add a new item if one with the specified primary key doesn't exist), or replace an existing item if it has certain attribute values. You can return the item's attribute values in the same operation, using the ReturnValues
parameter.
Creates a new item, or replaces an old item with a new item. If an item that has the
+ * same primary key as the new item already exists in the specified table, the new item
+ * completely replaces the existing item. You can perform a conditional put operation (add
+ * a new item if one with the specified primary key doesn't exist), or replace an existing
+ * item if it has certain attribute values. You can return the item's attribute values in
+ * the same operation, using the ReturnValues
parameter.
This topic provides general information about the PutItem
API.
For information on how to call the PutItem
API using the AWS SDK in specific languages, see the following:
For information on how to call the PutItem
API using the Amazon Web Services SDK in specific languages, see the following:
- * PutItem in the AWS SDK for .NET + *
- *- * PutItem in the AWS SDK for C++ + *
- *- * PutItem in the AWS SDK for Go + *
- *- * PutItem in the AWS SDK for Java + *
- *+ *
* - * PutItem in the AWS SDK for Python + * PutItem in the SDK for Python (Boto) *
- *When you add an item, the primary key attributes are the only required attributes. + *
When you add an item, the primary key attributes are the only required attributes. * Attribute values cannot be null.
- *Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index. - * Set type attributes cannot be empty.
- *Invalid Requests with empty values will
- * be rejected with a ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional expression
- * that contains the attribute_not_exists
function with the name of the
- * attribute being used as the partition key for the table. Since every record must contain
- * that attribute, the attribute_not_exists
function will only succeed if
- * no matching item exists.
For more information about PutItem
, see Working with Items in the Amazon DynamoDB Developer Guide.
Empty String and Binary attribute values are allowed. Attribute values of type String + * and Binary must have a length greater than zero if the attribute is used as a key + * attribute for a table or index. Set type attributes cannot be empty.
+ *Invalid Requests with empty values will be rejected with a
+ * ValidationException
exception.
To prevent a new item from replacing an existing item, use a conditional
+ * expression that contains the attribute_not_exists
function with the
+ * name of the attribute being used as the partition key for the table. Since every
+ * record must contain that attribute, the attribute_not_exists
function
+ * will only succeed if no matching item exists.
For more information about PutItem
, see Working with
+ * Items in the Amazon DynamoDB Developer Guide.
The Query
operation finds items based on primary key values.
- * You can query any table or secondary index that has a composite primary key (a partition
- * key and a sort key).
- *
Use the KeyConditionExpression
parameter to provide a specific value
- * for the partition key. The Query
operation will return all of the items
- * from the table or index with that partition key value. You can optionally narrow the
- * scope of the Query
operation by specifying a sort key value and a
- * comparison operator in KeyConditionExpression
. To further refine the Query
results,
- * you can optionally provide a FilterExpression
. A FilterExpression
determines which items
- * within the results should be returned to you. All of the other results are discarded.
- *
- * A Query
operation always returns a result set. If no matching items are found,
- * the result set will be empty. Queries that do not return results consume the minimum number of
- * read capacity units for that type of read operation.
- *
You must provide the name of the partition key attribute and a single value for that
+ * attribute. Query
returns all items with that partition key value.
+ * Optionally, you can provide a sort key attribute and use a comparison operator to refine
+ * the search results.
Use the KeyConditionExpression
parameter to provide a specific value for
+ * the partition key. The Query
operation will return all of the items from
+ * the table or index with that partition key value. You can optionally narrow the scope of
+ * the Query
operation by specifying a sort key value and a comparison
+ * operator in KeyConditionExpression
. To further refine the
+ * Query
results, you can optionally provide a
+ * FilterExpression
. A FilterExpression
determines which
+ * items within the results should be returned to you. All of the other results are
+ * discarded.
A Query
operation always returns a result set. If no matching items are
+ * found, the result set will be empty. Queries that do not return results consume the
+ * minimum number of read capacity units for that type of read operation.
- * DynamoDB calculates the number of read capacity units consumed based on item size,
- * not on the amount of data that is returned to an application. The number of capacity
- * units consumed will be the same whether you request all of the attributes (the default behavior)
- * or just some of them (using a projection expression). The number will also be the same
- * whether or not you use a FilterExpression
.
+ *
DynamoDB calculates the number of read capacity units consumed based on item
+ * size, not on the amount of data that is returned to an application. The number of
+ * capacity units consumed will be the same whether you request all of the attributes
+ * (the default behavior) or just some of them (using a projection expression). The
+ * number will also be the same whether or not you use a FilterExpression
.
*
- * Query
results are always sorted by the sort key value. If the data type of the sort key is Number,
- * the results are returned in numeric order; otherwise, the results are returned in order of UTF-8 bytes.
- * By default, the sort order is ascending. To reverse the order, set the ScanIndexForward
parameter
- * to false.
- *
A single Query
operation will read up to the maximum number of items
- * set (if using the Limit
parameter) or a maximum of 1 MB of data and then
- * apply any filtering to the results using FilterExpression
. If
+ * Query
results are always sorted by the sort key value. If the data type of
+ * the sort key is Number, the results are returned in numeric order; otherwise, the
+ * results are returned in order of UTF-8 bytes. By default, the sort order is ascending.
+ * To reverse the order, set the ScanIndexForward
parameter to false.
A single Query
operation will read up to the maximum number of items set
+ * (if using the Limit
parameter) or a maximum of 1 MB of data and then apply
+ * any filtering to the results using FilterExpression
. If
* LastEvaluatedKey
is present in the response, you will need to paginate
* the result set. For more information, see Paginating
* the Results in the Amazon DynamoDB Developer Guide.
* FilterExpression
is applied after a Query
finishes, but before
- * the results are returned.
- * A FilterExpression
cannot contain partition key or sort key attributes.
- * You need to specify those attributes in the KeyConditionExpression
.
- *
FilterExpression
cannot contain partition key
+ * or sort key attributes. You need to specify those attributes in the
+ * KeyConditionExpression
.
*
- * A Query
operation can return an empty result set and a LastEvaluatedKey
- * if all the items read for the page of results are filtered out.
- *
A Query
operation can return an empty result set and a
+ * LastEvaluatedKey
if all the items read for the page of results are
+ * filtered out.
You can query a table, a local secondary index, or a global secondary index. For a
* query on a table or on a local secondary index, you can set the
- * ConsistentRead
parameter to true
and obtain a
- * strongly consistent result. Global secondary indexes support eventually consistent reads
- * only, so do not specify ConsistentRead
when querying a global
- * secondary index.
ConsistentRead
parameter to true
and obtain a strongly
+ * consistent result. Global secondary indexes support eventually consistent reads only, so
+ * do not specify ConsistentRead
when querying a global secondary
+ * index.
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
diff --git a/clients/client-dynamodb/src/commands/RestoreTableFromBackupCommand.ts b/clients/client-dynamodb/src/commands/RestoreTableFromBackupCommand.ts
index b4dc0a0751f51..47462e58fc888 100644
--- a/clients/client-dynamodb/src/commands/RestoreTableFromBackupCommand.ts
+++ b/clients/client-dynamodb/src/commands/RestoreTableFromBackupCommand.ts
@@ -22,30 +22,30 @@ export interface RestoreTableFromBackupCommandInput extends RestoreTableFromBack
export interface RestoreTableFromBackupCommandOutput extends RestoreTableFromBackupOutput, __MetadataBearer {}
/**
- * Creates a new table from an existing backup. Any number of users can execute up to 4 concurrent restores - * (any type of restore) in a given account. - *
- *You can call RestoreTableFromBackup
at a maximum rate of 10 times per second.
You must manually set up the following on the restored table:
- *Creates a new table from an existing backup. Any number of users can execute up to 4 + * concurrent restores (any type of restore) in a given account.
+ *You can call RestoreTableFromBackup
at a maximum rate of 10 times per
+ * second.
You must manually set up the following on the restored table:
+ *Auto scaling policies
- *Auto scaling policies
+ * *IAM policies
- *IAM policies
+ * *Amazon CloudWatch metrics and alarms
- *Amazon CloudWatch metrics and alarms
+ * *Tags
- *Tags
+ * *Stream settings
- *Stream settings
+ * *Time to Live (TTL) settings
- *Time to Live (TTL) settings
+ * *Restores the specified table to the specified point in time within
- * EarliestRestorableDateTime
and LatestRestorableDateTime
.
- * You can restore your table to any point in time during the last 35 days.
- * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
- *
- * When you restore using point in time recovery, DynamoDB restores your table data to the state based on - * the selected date and time (day:hour:minute:second) to a new table. - *
- *- * Along with data, the following are also included on the new restored table using point in time recovery: - *
+ *EarliestRestorableDateTime
and LatestRestorableDateTime
.
+ * You can restore your table to any point in time during the last 35 days. Any number of
+ * users can execute up to 4 concurrent restores (any type of restore) in a given account.
+ * When you restore using point in time recovery, DynamoDB restores your table data to + * the state based on the selected date and time (day:hour:minute:second) to a new table.
+ *Along with data, the following are also included on the new restored table using + * point in time recovery:
*Global secondary indexes (GSIs)
- *Global secondary indexes (GSIs)
+ * *Local secondary indexes (LSIs)
- *Local secondary indexes (LSIs)
+ * *Provisioned read and write capacity
- *Provisioned read and write capacity
+ * *Encryption settings
- *- * All these settings come from the current settings of the source table at the time of restore. - *
- *Encryption settings
+ *All these settings come from the current settings of the source table at + * the time of restore.
+ *You must manually set up the following on the restored table:
- *You must manually set up the following on the restored table:
+ *Auto scaling policies
- *Auto scaling policies
+ * *IAM policies
- *IAM policies
+ * *Amazon CloudWatch metrics and alarms
- *Amazon CloudWatch metrics and alarms
+ * *Tags
- *Tags
+ * *Stream settings
- *Stream settings
+ * *Time to Live (TTL) settings
- *Time to Live (TTL) settings
+ * *Point in time recovery settings
- *Point in time recovery settings
+ * *The Scan
operation returns one or more items and item attributes by accessing every
- * item in a table or a secondary index. To have DynamoDB return fewer items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB, the
- * scan stops and results are returned to the user as a LastEvaluatedKey
value
- * to continue the scan in a subsequent operation. The results also include the number of
- * items exceeding the limit. A scan can result in no table data meeting the filter
- * criteria.
A single Scan
operation reads up to the maximum number of items set (if
+ *
The Scan
operation returns one or more items and item attributes by
+ * accessing every item in a table or a secondary index. To have DynamoDB return fewer
+ * items, you can provide a FilterExpression
operation.
If the total number of scanned items exceeds the maximum dataset size limit of 1 MB,
+ * the scan stops and results are returned to the user as a LastEvaluatedKey
+ * value to continue the scan in a subsequent operation. The results also include the
+ * number of items exceeding the limit. A scan can result in no table data meeting the
+ * filter criteria.
A single Scan
operation reads up to the maximum number of items set (if
* using the Limit
parameter) or a maximum of 1 MB of data and then apply any
* filtering to the results using FilterExpression
. If
* LastEvaluatedKey
is present in the response, you need to paginate the
* result set. For more information, see Paginating the
* Results in the Amazon DynamoDB Developer Guide.
+ *
* Scan
operations proceed sequentially; however, for faster performance on
* a large table or secondary index, applications can request a parallel Scan
* operation by providing the Segment
and TotalSegments
* parameters. For more information, see Parallel
* Scan in the Amazon DynamoDB Developer Guide.
- * Scan
uses eventually consistent reads when accessing the data in a
- * table; therefore, the result set might not include the changes to data in the table
- * immediately before the operation began. If you need a consistent copy of the data, as of
- * the time that the Scan
begins, you can set the ConsistentRead
- * parameter to true
.
+ * Scan
uses eventually consistent reads when accessing the data in a table;
+ * therefore, the result set might not include the changes to data in the table immediately
+ * before the operation began. If you need a consistent copy of the data, as of the time
+ * that the Scan
begins, you can set the ConsistentRead
parameter
+ * to true
.
For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/TransactGetItemsCommand.ts b/clients/client-dynamodb/src/commands/TransactGetItemsCommand.ts index 67cdc72653252..f42e445aa459b 100644 --- a/clients/client-dynamodb/src/commands/TransactGetItemsCommand.ts +++ b/clients/client-dynamodb/src/commands/TransactGetItemsCommand.ts @@ -29,23 +29,24 @@ export interface TransactGetItemsCommandOutput extends TransactGetItemsOutput, _ *TransactGetItem
objects, each of which contains a Get
* structure that specifies an item to retrieve from a table in the account and Region. A
* call to TransactGetItems
cannot retrieve items from tables in more than one
- * AWS account or Region. The aggregate size of the items in the transaction cannot exceed
- * 4 MB.
- * DynamoDB rejects the entire TransactGetItems
request if any of the following is true:
DynamoDB rejects the entire TransactGetItems
request if any of
+ * the following is true:
A conflicting operation is in the process of updating an - * item to be read.
+ *A conflicting operation is in the process of updating an item to be + * read.
*There is insufficient provisioned capacity for the transaction - * to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*The aggregate size of the items in the transaction cannot exceed 4 MB.
+ *The aggregate size of the items in the transaction cannot exceed 4 MB.
*
* TransactWriteItems
is a synchronous write operation that groups up to 25
* action requests. These actions can target items in different tables, but not in
- * different AWS accounts or Regions, and no two actions can target the same item. For
- * example, you cannot both ConditionCheck
and Update
the same
- * item. The aggregate size of the items in the transaction cannot exceed 4 MB.
ConditionCheck
and Update
+ * the same item. The aggregate size of the items in the transaction cannot exceed 4
+ * MB.
*
- * The actions are completed atomically so that either all of - * them succeed, or all of them fail. They are defined by the following objects:
+ *The actions are completed atomically so that either all of them succeed, or all of + * them fail. They are defined by the following objects:
* - *
- * Put
 —  Initiates a PutItem
operation to write a new
- * item. This structure specifies the primary key of the item to be written, the
- * name of the table to write it in, an optional condition expression that must be
- * satisfied for the write to succeed, a list of the item's attributes, and a field
- * indicating whether to retrieve the item's attributes if the condition is not
- * met.
- * Update
 —  Initiates an UpdateItem
operation to
- * update an existing item. This structure specifies the primary key of the item to
- * be updated, the name of the table where it resides, an optional condition
- * expression that must be satisfied for the update to succeed, an expression that
- * defines one or more attributes to be updated, and a field indicating whether to
- * retrieve the item's attributes if the condition is not met.
+ * Put
 —  Initiates a PutItem
+ * operation to write a new item. This structure specifies the primary key of the
+ * item to be written, the name of the table to write it in, an optional condition
+ * expression that must be satisfied for the write to succeed, a list of the item's
+ * attributes, and a field indicating whether to retrieve the item's attributes if
+ * the condition is not met.
- * Delete
 —  Initiates a DeleteItem
operation to
- * delete an existing item. This structure specifies the primary key of the item to
- * be deleted, the name of the table where it resides, an optional condition
- * expression that must be satisfied for the deletion to succeed, and a field
+ *
+ * Update
 —  Initiates an UpdateItem
+ * operation to update an existing item. This structure specifies the primary key
+ * of the item to be updated, the name of the table where it resides, an optional
+ * condition expression that must be satisfied for the update to succeed, an
+ * expression that defines one or more attributes to be updated, and a field
* indicating whether to retrieve the item's attributes if the condition is not
* met.
- * ConditionCheck
 —  Applies a condition to an item that is not
- * being modified by the transaction. This structure specifies the primary key of
- * the item to be checked, the name of the table where it resides, a condition
- * expression that must be satisfied for the transaction to succeed, and a field
- * indicating whether to retrieve the item's attributes if the condition is not
- * met.
+ * Delete
 —  Initiates a DeleteItem
+ * operation to delete an existing item. This structure specifies the primary key
+ * of the item to be deleted, the name of the table where it resides, an optional
+ * condition expression that must be satisfied for the deletion to succeed, and a
+ * field indicating whether to retrieve the item's attributes if the condition is
+ * not met.
+ * ConditionCheck
 —  Applies a condition to an item
+ * that is not being modified by the transaction. This structure specifies the
+ * primary key of the item to be checked, the name of the table where it resides, a
+ * condition expression that must be satisfied for the transaction to succeed, and
+ * a field indicating whether to retrieve the item's attributes if the condition is
+ * not met.
DynamoDB rejects the entire TransactWriteItems
request if any of the following is true:
DynamoDB rejects the entire TransactWriteItems
request if any of the
+ * following is true:
A condition in one of the condition expressions is not met.
+ *A condition in one of the condition expressions is not met.
*An ongoing operation is in the process of updating the same - * item.
+ *An ongoing operation is in the process of updating the same item.
*There is insufficient provisioned capacity for the transaction - * to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*An item size becomes too large (bigger than 400 KB), a local secondary index (LSI) becomes too - * large, or a similar validation error occurs because of changes made by the - * transaction.
+ *An item size becomes too large (bigger than 400 KB), a local secondary index + * (LSI) becomes too large, or a similar validation error occurs because of changes + * made by the transaction.
*The aggregate size of the items in the transaction exceeds 4 MB.
+ *The aggregate size of the items in the transaction exceeds 4 MB.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*Removes the association of tags from an Amazon DynamoDB resource. You can call
* UntagResource
up to five times per second, per account.
For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *For an overview on tagging DynamoDB resources, see Tagging for DynamoDB + * in the Amazon DynamoDB Developer Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/UpdateContinuousBackupsCommand.ts b/clients/client-dynamodb/src/commands/UpdateContinuousBackupsCommand.ts index 04cee44e20a30..648b49f29b8b1 100644 --- a/clients/client-dynamodb/src/commands/UpdateContinuousBackupsCommand.ts +++ b/clients/client-dynamodb/src/commands/UpdateContinuousBackupsCommand.ts @@ -23,15 +23,15 @@ export interface UpdateContinuousBackupsCommandOutput extends UpdateContinuousBa /** *
- * UpdateContinuousBackups
enables or disables point in time recovery for the specified table.
- * A successful UpdateContinuousBackups
call returns the current ContinuousBackupsDescription
.
- * Continuous backups are ENABLED
on all tables at table creation.
- * If point in time recovery is enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
- * Once continuous backups and point in time recovery are enabled, you can restore to any point in time within
- * EarliestRestorableDateTime
and LatestRestorableDateTime
.
- *
+ * UpdateContinuousBackups
enables or disables point in time recovery for
+ * the specified table. A successful UpdateContinuousBackups
call returns the
+ * current ContinuousBackupsDescription
. Continuous backups are
+ * ENABLED
on all tables at table creation. If point in time recovery is
+ * enabled, PointInTimeRecoveryStatus
will be set to ENABLED.
Once continuous backups and point in time recovery are enabled, you can restore to
+ * any point in time within EarliestRestorableDateTime
and
+ * LatestRestorableDateTime
.
* LatestRestorableDateTime
is typically 5 minutes before the current time.
* You can restore your table to any point in time during the last 35 days.
Updates the status for contributor insights for a specific table or index.
+ *Updates the status for contributor insights for a specific table or index. CloudWatch + * Contributor Insights for DynamoDB graphs display the partition key and (if applicable) + * sort key of frequently accessed items and frequently throttled items in plaintext. If + * you require the use of AWS Key Management Service (KMS) to encrypt this table’s + * partition key and sort key data with an AWS managed key or customer managed key, you + * should not enable CloudWatch Contributor Insights for DynamoDB for this table.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/commands/UpdateGlobalTableCommand.ts b/clients/client-dynamodb/src/commands/UpdateGlobalTableCommand.ts index 549783b2600ad..80d117cf64027 100644 --- a/clients/client-dynamodb/src/commands/UpdateGlobalTableCommand.ts +++ b/clients/client-dynamodb/src/commands/UpdateGlobalTableCommand.ts @@ -27,28 +27,23 @@ export interface UpdateGlobalTableCommandOutput extends UpdateGlobalTableOutput, * same name as the global table, have the same key schema, have DynamoDB Streams enabled, * and have the same provisioned and maximum write capacity units. *Although you can use UpdateGlobalTable
to add replicas and remove replicas in
- * a single request, for simplicity we recommend that you issue separate requests for
- * adding or removing replicas.
- * If global secondary indexes are specified, then the following conditions must also be met: - *
+ *Although you can use UpdateGlobalTable
to add replicas and remove
+ * replicas in a single request, for simplicity we recommend that you issue separate
+ * requests for adding or removing replicas.
If global secondary indexes are specified, then the following conditions must also be + * met:
*- * The global secondary indexes must have the same name. - *
+ *The global secondary indexes must have the same name.
*- * The global secondary indexes must have the same hash key and sort key (if present). - *
+ *The global secondary indexes must have the same hash key and sort key (if + * present).
*- * The global secondary indexes must have the same provisioned and maximum write capacity units. - *
+ *The global secondary indexes must have the same provisioned and maximum write + * capacity units.
*Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values).
- *You can also return the item's attribute values in the same UpdateItem
- * operation using the ReturnValues
parameter.
Edits an existing item's attributes, or adds a new item to the table if it does not + * already exist. You can put, delete, or add attribute values. You can also perform a + * conditional update on an existing item (insert a new attribute name-value pair if it + * doesn't exist, or replace an existing name-value pair if it has certain expected + * attribute values).
+ *You can also return the item's attribute values in the same UpdateItem
+ * operation using the ReturnValues
parameter.
Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.
- *You can only perform one of the following operations at once:
- *Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB + * Streams settings for a given table.
+ *You can only perform one of the following operations at once:
+ *Modify the provisioned throughput settings of the table.
+ *Modify the provisioned throughput settings of the table.
*Enable or disable DynamoDB Streams on the table.
+ *Enable or disable DynamoDB Streams on the table.
*Remove a global secondary index from the table.
+ *Remove a global secondary index from the table.
*Create a new global secondary index on the table. After the index begins + *
Create a new global secondary index on the table. After the index begins
* backfilling, you can use UpdateTable
to perform other
* operations.
- * UpdateTable
is an asynchronous operation; while it is executing, the table status
- * changes from ACTIVE
to UPDATING
. While it is UPDATING
,
- * you cannot issue another UpdateTable
request. When the table returns to the
- * ACTIVE
state, the UpdateTable
operation is complete.
+ * UpdateTable
is an asynchronous operation; while it is executing, the table
+ * status changes from ACTIVE
to UPDATING
. While it is
+ * UPDATING
, you cannot issue another UpdateTable
request.
+ * When the table returns to the ACTIVE
state, the UpdateTable
+ * operation is complete.
Updates auto scaling settings on your global tables at once.
- *This operation only applies to Version 2019.11.21 of global tables.
- *This operation only applies to Version + * 2019.11.21 of global tables.
+ *The UpdateTimeToLive
method enables or disables Time to Live (TTL) for the
- * specified table. A successful UpdateTimeToLive
call returns the current
+ *
The UpdateTimeToLive
method enables or disables Time to Live (TTL) for
+ * the specified table. A successful UpdateTimeToLive
call returns the current
* TimeToLiveSpecification
. It can take up to one hour for the change to
* fully process. Any additional UpdateTimeToLive
calls for the same table
* during this one hour duration result in a ValidationException
.
TTL compares the current time in epoch time format to the time stored in the TTL + *
TTL compares the current time in epoch time format to the time stored in the TTL * attribute of an item. If the epoch time value stored in the attribute is less than the * current time, the item is marked as expired and subsequently deleted.
*The epoch time format is the number of seconds elapsed since 12:00:00 AM January 1, - * 1970 UTC.
+ *The epoch time format is the number of seconds elapsed since 12:00:00 AM January + * 1, 1970 UTC.
*DynamoDB deletes expired items on a best-effort basis to ensure availability of - * throughput for other data operations. - *
- *DynamoDB typically deletes expired items within two days of expiration. The exact duration - * within which an item gets deleted after expiration is specific to the - * nature of the workload. Items that have expired and not been deleted will still show up in reads, - * queries, and scans.
- *DynamoDB typically deletes expired items within two days of expiration. The exact + * duration within which an item gets deleted after expiration is specific to the + * nature of the workload. Items that have expired and not been deleted will still show + * up in reads, queries, and scans.
+ *As items are deleted, they are removed from any local secondary index and global * secondary index immediately in the same eventually consistent way as a standard delete * operation.
- *For more information, see Time To Live in the Amazon DynamoDB Developer Guide.
+ *For more information, see Time To Live in the + * Amazon DynamoDB Developer Guide.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-dynamodb/src/endpoints.ts b/clients/client-dynamodb/src/endpoints.ts index e84c3c8d796a9..686b5d717dd51 100644 --- a/clients/client-dynamodb/src/endpoints.ts +++ b/clients/client-dynamodb/src/endpoints.ts @@ -178,6 +178,10 @@ const partitionHash: PartitionHash = { hostname: "dynamodb.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "dynamodb-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -188,6 +192,10 @@ const partitionHash: PartitionHash = { hostname: "dynamodb.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "dynamodb-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-dynamodb/src/models/models_0.ts b/clients/client-dynamodb/src/models/models_0.ts index 81be17fe15688..22c3faf937fa7 100644 --- a/clients/client-dynamodb/src/models/models_0.ts +++ b/clients/client-dynamodb/src/models/models_0.ts @@ -5,32 +5,29 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException */ export interface ArchivalSummary { /** - *The date and time when table archival was initiated by DynamoDB, - * in UNIX epoch time format.
+ *The date and time when table archival was initiated by DynamoDB, in UNIX epoch time + * format.
*/ ArchivalDateTime?: Date; /** - *The reason DynamoDB archived the table. Currently, the only - * possible value is:
+ *The reason DynamoDB archived the table. Currently, the only possible value is:
* - *
- * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The
- * table was archived due to the table's AWS KMS key being inaccessible
- * for more than seven days. An On-Demand backup was created at the archival
- * time.
+ * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The table was archived due
+ * to the table's KMS key being inaccessible for more than seven
+ * days. An On-Demand backup was created at the archival time.
The Amazon Resource Name (ARN) of the backup the table was archived - * to, when applicable in the archival reason. If you wish to restore this - * backup to the same table name, you will need to delete the original - * table.
+ *The Amazon Resource Name (ARN) of the backup the table was archived to, when + * applicable in the archival reason. If you wish to restore this backup to the same table + * name, you will need to delete the original table.
*/ ArchivalBackupArn?: string; } @@ -49,7 +46,8 @@ export type AttributeAction = "ADD" | "DELETE" | "PUT"; export type ScalarAttributeType = "B" | "N" | "S"; /** - *Represents an attribute for describing the key schema for the table and indexes.
+ *Represents an attribute for describing the key schema for the table and + * indexes.
*/ export interface AttributeDefinition { /** @@ -59,18 +57,18 @@ export interface AttributeDefinition { /** *The data type for the attribute, where:
- *
- * S
- the attribute is of type String
+ * S
- the attribute is of type String
- * N
- the attribute is of type Number
+ * N
- the attribute is of type Number
- * B
- the attribute is of type Binary
+ * B
- the attribute is of type Binary
Indicates whether scale in by the target tracking policy is disabled. If the value is true, - * scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. - * Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. - * The default value is false.
+ *Indicates whether scale in by the target tracking policy is disabled. If the value is + * true, scale in is disabled and the target tracking policy won't remove capacity from the + * scalable resource. Otherwise, scale in is enabled and the target tracking policy can + * remove capacity from the scalable resource. The default value is false.
*/ DisableScaleIn?: boolean; /** - *The amount of time, in seconds, after a scale in activity completes before another scale - * in activity can start. The cooldown period is used to block subsequent scale in requests - * until it has expired. You should scale in conservatively to protect your application's - * availability. However, if another alarm triggers a scale out policy during the cooldown - * period after a scale-in, application auto scaling scales out your scalable target - * immediately.
+ *The amount of time, in seconds, after a scale in activity completes before another + * scale in activity can start. The cooldown period is used to block subsequent scale in + * requests until it has expired. You should scale in conservatively to protect your + * application's availability. However, if another alarm triggers a scale out policy during + * the cooldown period after a scale-in, application auto scaling scales out your scalable + * target immediately.
*/ ScaleInCooldown?: number; /** - *The amount of time, in seconds, after a scale out activity completes before another scale out - * activity can start. While the cooldown period is in effect, the capacity that has been added - * by the previous scale out event that initiated the cooldown is calculated as part of the - * desired capacity for the next scale out. You should continuously (but not excessively) - * scale out.
+ *The amount of time, in seconds, after a scale out activity completes before another + * scale out activity can start. While the cooldown period is in effect, the capacity that + * has been added by the previous scale out event that initiated the cooldown is calculated + * as part of the desired capacity for the next scale out. You should continuously (but not + * excessively) scale out.
*/ ScaleOutCooldown?: number; /** - *The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).
+ *The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) + * or 2e-360 to 2e360 (Base 2).
*/ TargetValue: number | undefined; } @@ -157,38 +156,40 @@ export namespace AutoScalingPolicyDescription { } /** - *Represents the settings of a target tracking scaling policy that will be modified.
+ *Represents the settings of a target tracking scaling policy that will be + * modified.
*/ export interface AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { /** - *Indicates whether scale in by the target tracking policy is disabled. If the value is true, - * scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. - * Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. - * The default value is false.
+ *Indicates whether scale in by the target tracking policy is disabled. If the value is + * true, scale in is disabled and the target tracking policy won't remove capacity from the + * scalable resource. Otherwise, scale in is enabled and the target tracking policy can + * remove capacity from the scalable resource. The default value is false.
*/ DisableScaleIn?: boolean; /** - *The amount of time, in seconds, after a scale in activity completes before another scale - * in activity can start. The cooldown period is used to block subsequent scale in requests - * until it has expired. You should scale in conservatively to protect your application's - * availability. However, if another alarm triggers a scale out policy during the cooldown - * period after a scale-in, application auto scaling scales out your scalable target - * immediately.
+ *The amount of time, in seconds, after a scale in activity completes before another + * scale in activity can start. The cooldown period is used to block subsequent scale in + * requests until it has expired. You should scale in conservatively to protect your + * application's availability. However, if another alarm triggers a scale out policy during + * the cooldown period after a scale-in, application auto scaling scales out your scalable + * target immediately.
*/ ScaleInCooldown?: number; /** - *The amount of time, in seconds, after a scale out activity completes before another scale out - * activity can start. While the cooldown period is in effect, the capacity that has been added - * by the previous scale out event that initiated the cooldown is calculated as part of the - * desired capacity for the next scale out. You should continuously (but not excessively) - * scale out.
+ *The amount of time, in seconds, after a scale out activity completes before another + * scale out activity can start. While the cooldown period is in effect, the capacity that + * has been added by the previous scale out event that initiated the cooldown is calculated + * as part of the desired capacity for the next scale out. You should continuously (but not + * excessively) scale out.
*/ ScaleOutCooldown?: number; /** - *The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).
+ *The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) + * or 2e-360 to 2e360 (Base 2).
*/ TargetValue: number | undefined; } @@ -228,16 +229,18 @@ export namespace AutoScalingPolicyUpdate { /** *Represents the auto scaling settings for a global table or global secondary - * index.
+ * index. */ export interface AutoScalingSettingsDescription { /** - *The minimum capacity units that a global table or global secondary index should be scaled down to.
+ *The minimum capacity units that a global table or global secondary index should be + * scaled down to.
*/ MinimumUnits?: number; /** - *The maximum capacity units that a global table or global secondary index should be scaled up to.
+ *The maximum capacity units that a global table or global secondary index should be + * scaled up to.
*/ MaximumUnits?: number; @@ -268,16 +271,18 @@ export namespace AutoScalingSettingsDescription { /** *Represents the auto scaling settings to be modified for a global table or global - * secondary index.
+ * secondary index. */ export interface AutoScalingSettingsUpdate { /** - *The minimum capacity units that a global table or global secondary index should be scaled down to.
+ *The minimum capacity units that a global table or global secondary index should be + * scaled down to.
*/ MinimumUnits?: number; /** - *The maximum capacity units that a global table or global secondary index should be scaled up to.
+ *The maximum capacity units that a global table or global secondary index should be + * scaled up to.
*/ MaximumUnits?: number; @@ -292,7 +297,8 @@ export interface AutoScalingSettingsUpdate { AutoScalingRoleArn?: string; /** - *The scaling policy to apply for scaling target global table or global secondary index capacity units.
+ *The scaling policy to apply for scaling target global table or global secondary index + * capacity units.
*/ ScalingPolicyUpdate?: AutoScalingPolicyUpdate; } @@ -340,21 +346,22 @@ export interface BackupDetails { /** *BackupType:
- *
- * USER
- You create and manage these using the on-demand backup feature.
USER
- You create and manage these using the on-demand backup
+ * feature.
*
- * SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically
- * created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted
- * table to the state it was in just before the point of deletion.
- *
SYSTEM
- If you delete a table with point-in-time recovery enabled,
+ * a SYSTEM
backup is automatically created and is retained for 35
+ * days (at no additional cost). System backups allow you to restore the deleted
+ * table to the state it was in just before the point of deletion.
*
- * AWS_BACKUP
- On-demand backup created by you from AWS Backup service.
AWS_BACKUP
- On-demand backup created by you from Backup service.
* Time at which the automatic on-demand backup created by DynamoDB will expire. This SYSTEM
- * on-demand backup expires automatically 35 days after its creation.
Time at which the automatic on-demand backup created by DynamoDB will
+ * expire. This SYSTEM
on-demand backup expires automatically 35 days after
+ * its creation.
Represents a single element of a key schema. A key schema specifies the attributes - * that make up the primary key of a table, or the key attributes of an index.
- *A KeySchemaElement
represents exactly one attribute of the primary key. For example, a
- * simple primary key would be represented by one KeySchemaElement
(for the partition key). A composite
- * primary key would require one KeySchemaElement
for the partition key, and another
- * KeySchemaElement
for the sort key.
A KeySchemaElement
must be a scalar, top-level attribute (not a nested attribute). The data type must be one of String, Number, or Binary. The attribute cannot be nested within a List or a Map.
Represents a single element of a key schema. A key schema + * specifies the attributes that make up the primary key of a table, or the key attributes + * of an index.
+ *A KeySchemaElement
represents exactly one attribute of the primary key.
+ * For example, a simple primary key would be represented by one
+ * KeySchemaElement
(for the partition key). A composite primary key would
+ * require one KeySchemaElement
for the partition key, and another
+ * KeySchemaElement
for the sort key.
A KeySchemaElement
must be a scalar, top-level attribute (not a nested
+ * attribute). The data type must be one of String, Number, or Binary. The attribute cannot
+ * be nested within a List or a Map.
The role that this key attribute will assume:
- *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across + * partitions, based on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents the provisioned throughput settings for a specified table or index. The settings
- * can be modified using the UpdateTable
operation.
For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *Represents the provisioned throughput settings for a specified table or index. The
+ * settings can be modified using the UpdateTable
operation.
For current minimum and maximum provisioned throughput values, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ export interface ProvisionedThroughput { /** - *The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
- * ThrottlingException
. For more information, see Specifying Read and Write
- * Requirements in the Amazon DynamoDB Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to 0.
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB
+ * Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to
+ * 0.
The maximum number of writes consumed per second before DynamoDB returns a
- * ThrottlingException
. For more information, see Specifying Read and Write
- * Requirements in the Amazon DynamoDB Developer Guide.
If read/write capacity mode is PAY_PER_REQUEST
the value is set to 0.
ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB
+ * Developer Guide.
+ * If read/write capacity mode is PAY_PER_REQUEST
the value is set to
+ * 0.
Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.
- *Controls how you are charged for read and write throughput and how you manage + * capacity. This setting can be changed later.
+ *
- * PROVISIONED
- Sets the read/write capacity mode to PROVISIONED
. We recommend using PROVISIONED
for predictable workloads.
+ * PROVISIONED
- Sets the read/write capacity mode to
+ * PROVISIONED
. We recommend using PROVISIONED
for
+ * predictable workloads.
- * PAY_PER_REQUEST
- Sets the read/write capacity mode to PAY_PER_REQUEST
. We recommend using PAY_PER_REQUEST
for unpredictable workloads.
- *
+ * PAY_PER_REQUEST
- Sets the read/write capacity mode to
+ * PAY_PER_REQUEST
. We recommend using
+ * PAY_PER_REQUEST
for unpredictable workloads.
Represents attributes that are copied (projected) from the table into an index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
+ *Represents attributes that are copied (projected) from the table into an index. These + * are in addition to the primary key attributes and index key attributes, which are + * automatically projected.
*/ export interface Projection { /** *The set of attributes that are projected into the index:
- *
- * KEYS_ONLY
- Only the index and primary keys are projected into the
- * index.
+ * KEYS_ONLY
- Only the index and primary keys are projected into the
+ * index.
- * INCLUDE
- In addition to the attributes described in KEYS_ONLY
, the secondary index will include other non-key attributes that you specify.
+ * INCLUDE
- In addition to the attributes described in
+ * KEYS_ONLY
, the secondary index will include other non-key
+ * attributes that you specify.
- * ALL
- All of the table attributes are projected into the index.
+ * ALL
- All of the table attributes are projected into the
+ * index.
Represents the non-key attribute names which will be projected into the index.
- *For local secondary indexes, the total count of NonKeyAttributes
summed across all of the local secondary indexes,
- * must not exceed 20. If you project the same attribute into two
- * different indexes, this counts as two distinct attributes when determining the total.
For local secondary indexes, the total count of NonKeyAttributes
summed
+ * across all of the local secondary indexes, must not exceed 20. If you project the same
+ * attribute into two different indexes, this counts as two distinct attributes when
+ * determining the total.
Represents the properties of a global secondary index for the table - * when the backup was created.
+ *Represents the properties of a global secondary index for the table when the backup + * was created.
*/ export interface GlobalSecondaryIndexInfo { /** @@ -590,38 +617,40 @@ export interface GlobalSecondaryIndexInfo { IndexName?: string; /** - *The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:
- *The complete key schema for a global secondary index, which consists of one or more + * pairs of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across + * partitions, based on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into - * the global secondary index. These are in addition to the primary - * key attributes and index key attributes, which are automatically - * projected.
+ *Represents attributes that are copied (projected) from the table into the global + * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected.
*/ Projection?: Projection; /** - *Represents the provisioned throughput settings for the specified global secondary index.
+ *Represents the provisioned throughput settings for the specified global secondary + * index.
*/ ProvisionedThroughput?: ProvisionedThroughput; } @@ -636,8 +665,8 @@ export namespace GlobalSecondaryIndexInfo { } /** - *Represents the properties of a local secondary index for the table - * when the backup was created.
+ *Represents the properties of a local secondary index for the table when the backup was + * created.
*/ export interface LocalSecondaryIndexInfo { /** @@ -646,30 +675,35 @@ export interface LocalSecondaryIndexInfo { IndexName?: string; /** - *The complete key schema for a local secondary index, which consists of one or more pairs of attribute names and key types:
- *The complete key schema for a local secondary index, which consists of one or more + * pairs of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of + * an internal hash function to evenly distribute data items across partitions, based + * on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into the global secondary index. These are in addition to the primary key attributes and index key attributes, which are automatically projected.
+ *Represents attributes that are copied (projected) from the table into the global + * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected.
*/ Projection?: Projection; } @@ -692,15 +726,16 @@ export type SSEStatus = "DISABLED" | "DISABLING" | "ENABLED" | "ENABLING" | "UPD */ export interface SSEDescription { /** - *Represents the current state of server-side encryption. The only supported values are:
- *Represents the current state of server-side encryption. The only supported values + * are:
+ *
- * ENABLED
- Server-side encryption is enabled.
+ * ENABLED
- Server-side encryption is enabled.
- * UPDATING
- Server-side encryption is being updated.
+ * UPDATING
- Server-side encryption is being updated.
Server-side encryption type. The only supported value is:
- *
- * KMS
- Server-side encryption that uses AWS Key Management Service. The
- * key is stored in your account and is managed by AWS KMS (AWS KMS charges
- * apply).
+ * KMS
- Server-side encryption that uses Key Management Service. The
+ * key is stored in your account and is managed by KMS (KMS charges apply).
The AWS KMS customer master key (CMK) ARN used for the AWS KMS encryption.
+ *The KMS key ARN used for the KMS + * encryption.
*/ KMSMasterKeyArn?: string; /** - *Indicates the time, in UNIX epoch date format, when DynamoDB detected that the table's - * AWS KMS key was inaccessible. This attribute will automatically be cleared when DynamoDB - * detects that the table's AWS KMS key is accessible again. DynamoDB will initiate the table - * archival process when table's AWS KMS key remains inaccessible for more than seven days - * from this date.
+ *Indicates the time, in UNIX epoch date format, when DynamoDB detected that + * the table's KMS key was inaccessible. This attribute will automatically + * be cleared when DynamoDB detects that the table's KMS key is accessible + * again. DynamoDB will initiate the table archival process when table's KMS key remains inaccessible for more than seven days from this date.
*/ InaccessibleEncryptionDateTime?: Date; } @@ -750,35 +784,36 @@ export type StreamViewType = "KEYS_ONLY" | "NEW_AND_OLD_IMAGES" | "NEW_IMAGE" | */ export interface StreamSpecification { /** - *Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the table.
+ *Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the + * table.
*/ StreamEnabled: boolean | undefined; /** - *
- * When an item in the table is modified, StreamViewType
- * determines what information is written to the stream for this table. Valid values for
- * StreamViewType
are:
- * KEYS_ONLY
- Only the key attributes of the modified item are written to the stream.
- * NEW_IMAGE
- The entire item, as it appears after it was modified, is written
- * to the stream.
- * OLD_IMAGE
- The entire item, as it appeared before it was modified, is
- * written to the stream.
- * NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are
- * written to the stream.
When an item in the table is modified, StreamViewType
determines what
+ * information is written to the stream for this table. Valid values for
+ * StreamViewType
are:
+ * KEYS_ONLY
- Only the key attributes of the modified item are
+ * written to the stream.
+ * NEW_IMAGE
- The entire item, as it appears after it was modified,
+ * is written to the stream.
+ * OLD_IMAGE
- The entire item, as it appeared before it was modified,
+ * is written to the stream.
+ * NEW_AND_OLD_IMAGES
- Both the new and the old item images of the
+ * item are written to the stream.
Contains the details of the features enabled on the table when the backup was created. For example, LSIs, GSIs, streams, TTL.
+ *Contains the details of the features enabled on the table when the backup was created. + * For example, LSIs, GSIs, streams, TTL.
*/ export interface SourceTableFeatureDetails { /** - *Represents the LSI properties for the table when the backup was created. It includes the IndexName, KeySchema and Projection for the LSIs on the table at the time of backup.
+ *Represents the LSI properties for the table when the backup was created. It includes + * the IndexName, KeySchema and Projection for the LSIs on the table at the time of backup. + *
*/ LocalSecondaryIndexes?: LocalSecondaryIndexInfo[]; /** - *Represents the GSI properties for the table when the backup was created. It includes the - * IndexName, KeySchema, Projection, and ProvisionedThroughput for the GSIs on the table at - * the time of backup.
+ *Represents the GSI properties for the table when the backup was created. It includes + * the IndexName, KeySchema, Projection, and ProvisionedThroughput for the GSIs on the + * table at the time of backup.
*/ GlobalSecondaryIndexes?: GlobalSecondaryIndexInfo[]; @@ -846,7 +884,8 @@ export interface SourceTableFeatureDetails { TimeToLiveDescription?: TimeToLiveDescription; /** - *The description of the server-side encryption status on the table when the backup was created.
+ *The description of the server-side encryption status on the table when the backup was + * created.
*/ SSEDescription?: SSEDescription; } @@ -875,7 +914,8 @@ export interface BackupDescription { SourceTableDetails?: SourceTableDetails; /** - *Contains the details of the features enabled on the table when the backup was created. For example, LSIs, GSIs, streams, TTL.
+ *Contains the details of the features enabled on the table when the backup was created. + * For example, LSIs, GSIs, streams, TTL.
*/ SourceTableFeatureDetails?: SourceTableFeatureDetails; } @@ -890,7 +930,8 @@ export namespace BackupDescription { } /** - *There is another ongoing conflicting backup control plane operation on the table. The backup is either being created, deleted or restored to a table.
+ *There is another ongoing conflicting backup control plane operation on the table. + * The backup is either being created, deleted or restored to a table.
*/ export interface BackupInUseException extends __SmithyException, $MetadataBearer { name: "BackupInUseException"; @@ -960,9 +1001,9 @@ export interface BackupSummary { BackupCreationDateTime?: Date; /** - *Time at which the automatic on-demand backup created by DynamoDB will expire. This
- * SYSTEM
on-demand backup expires automatically 35 days after its
- * creation.
Time at which the automatic on-demand backup created by DynamoDB will
+ * expire. This SYSTEM
on-demand backup expires automatically 35 days after
+ * its creation.
- * USER
- You create and manage these using the on-demand backup feature.
USER
- You create and manage these using the on-demand backup
+ * feature.
*
- * SYSTEM
- If you delete a table with point-in-time recovery enabled, a SYSTEM
backup is automatically
- * created and is retained for 35 days (at no additional cost). System backups allow you to restore the deleted
- * table to the state it was in just before the point of deletion.
- *
SYSTEM
- If you delete a table with point-in-time recovery enabled,
+ * a SYSTEM
backup is automatically created and is retained for 35
+ * days (at no additional cost). System backups allow you to restore the deleted
+ * table to the state it was in just before the point of deletion.
*
- * AWS_BACKUP
- On-demand backup created by you from AWS Backup service.
AWS_BACKUP
- On-demand backup created by you from Backup service.
* - * An error associated with a statement in a PartiQL batch that was run. - *
+ *An error associated with a statement in a PartiQL batch that was run.
*/ export interface BatchStatementError { /** - *- * The error code associated with the failed PartiQL batch statement. - *
+ *The error code associated with the failed PartiQL batch statement.
*/ Code?: BatchStatementErrorCodeEnum | string; /** - *- * The error message associated with the PartiQL batch resposne. - *
+ *The error message associated with the PartiQL batch resposne.
*/ Message?: string; } @@ -1081,7 +1117,9 @@ export namespace InternalServerError { } /** - *Throughput exceeds the current throughput quota for your account. Please contact AWS Support at AWS Support to request a quota increase.
+ *Throughput exceeds the current throughput quota for your account. Please contact + * Amazon Web Services Support to request a + * quota increase.
*/ export interface RequestLimitExceeded extends __SmithyException, $MetadataBearer { name: "RequestLimitExceeded"; @@ -1101,7 +1139,8 @@ export namespace RequestLimitExceeded { export type ReturnConsumedCapacity = "INDEXES" | "NONE" | "TOTAL"; /** - *Represents the amount of provisioned throughput capacity consumed on a table or an index.
+ *Represents the amount of provisioned throughput capacity consumed on a table or an + * index.
*/ export interface Capacity { /** @@ -1131,10 +1170,10 @@ export namespace Capacity { /** *The capacity units consumed by an operation. The data returned includes the total
- * provisioned throughput consumed, along with statistics for the table and any indexes involved
- * in the operation. ConsumedCapacity
is only returned if the request asked for it.
- * For more information, see Provisioned
- * Throughput in the Amazon DynamoDB Developer Guide.
ConsumedCapacity
is only returned if the request
+ * asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer
+ * Guide.
*/
export interface ConsumedCapacity {
/**
@@ -1163,12 +1202,14 @@ export interface ConsumedCapacity {
Table?: Capacity;
/**
- * The amount of throughput consumed on each local index affected by the operation.
+ *The amount of throughput consumed on each local index affected by the + * operation.
*/ LocalSecondaryIndexes?: { [key: string]: Capacity }; /** - *The amount of throughput consumed on each global index affected by the operation.
+ *The amount of throughput consumed on each global index affected by the + * operation.
*/ GlobalSecondaryIndexes?: { [key: string]: Capacity }; } @@ -1198,11 +1239,10 @@ export namespace InvalidEndpointException { } /** - *Your request rate is too high. The AWS SDKs for DynamoDB automatically retry requests that - * receive this exception. Your request is eventually successful, unless your retry queue is too - * large to finish. Reduce the frequency of requests and use exponential backoff. For more - * information, go to Error Retries and Exponential - * Backoff in the Amazon DynamoDB Developer Guide.
+ *Your request rate is too high. The Amazon Web Services SDKs for DynamoDB + * automatically retry requests that receive this exception. Your request is eventually + * successful, unless your retry queue is too large to finish. Reduce the frequency of + * requests and use exponential backoff. For more information, go to Error Retries and Exponential Backoff in the Amazon DynamoDB Developer Guide.
*/ export interface ProvisionedThroughputExceededException extends __SmithyException, $MetadataBearer { name: "ProvisionedThroughputExceededException"; @@ -1223,8 +1263,8 @@ export namespace ProvisionedThroughputExceededException { } /** - *The operation tried to access a nonexistent table or index. The resource might not be specified
- * correctly, or its status might not be ACTIVE
.
The operation tried to access a nonexistent table or index. The resource might not
+ * be specified correctly, or its status might not be ACTIVE
.
An item collection is too large. This exception is only returned for tables that have one or more local secondary indexes.
+ *An item collection is too large. This exception is only returned for tables that + * have one or more local secondary indexes.
*/ export interface ItemCollectionSizeLimitExceededException extends __SmithyException, $MetadataBearer { name: "ItemCollectionSizeLimitExceededException"; $fault: "client"; /** - *The total size of an item collection has exceeded the maximum limit of 10 gigabytes.
+ *The total size of an item collection has exceeded the maximum limit of 10 + * gigabytes.
*/ message?: string; } @@ -1272,23 +1314,28 @@ export namespace ItemCollectionSizeLimitExceededException { */ export interface BillingModeSummary { /** - *Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.
- *Controls how you are charged for read and write throughput and how you manage + * capacity. This setting can be changed later.
+ *
- * PROVISIONED
- Sets the read/write capacity mode to PROVISIONED
. We recommend using PROVISIONED
for predictable workloads.
+ * PROVISIONED
- Sets the read/write capacity mode to
+ * PROVISIONED
. We recommend using PROVISIONED
for
+ * predictable workloads.
- * PAY_PER_REQUEST
- Sets the read/write capacity mode to PAY_PER_REQUEST
. We recommend using PAY_PER_REQUEST
for unpredictable workloads.
- *
+ * PAY_PER_REQUEST
- Sets the read/write capacity mode to
+ * PAY_PER_REQUEST
. We recommend using
+ * PAY_PER_REQUEST
for unpredictable workloads.
Represents the time when PAY_PER_REQUEST
was last set as the read/write capacity mode.
Represents the time when PAY_PER_REQUEST
was last set as the read/write
+ * capacity mode.
The current state of point in time recovery:
- *
- * ENABLING
- Point in time recovery is being enabled.
+ * ENABLING
- Point in time recovery is being enabled.
- * ENABLED
- Point in time recovery is enabled.
+ * ENABLED
- Point in time recovery is enabled.
- * DISABLED
- Point in time recovery is disabled.
+ * DISABLED
- Point in time recovery is disabled.
Specifies the earliest point in time you can restore your table to. You can restore your - * table to any point in time during the last 35 days.
+ *Specifies the earliest point in time you can restore your table to. You can restore + * your table to any point in time during the last 35 days.
*/ EarliestRestorableDateTime?: Date; /** *
* LatestRestorableDateTime
is typically 5 minutes before the current time.
- *
Represents the continuous backups and point in time recovery settings on the table.
+ *Represents the continuous backups and point in time recovery settings on the + * table.
*/ export interface ContinuousBackupsDescription { /** *
* ContinuousBackupsStatus
can be one of the following states: ENABLED,
- * DISABLED
Describes the current status for contributor insights for the given table and index, if applicable.
+ *Describes the current status for contributor insights for the given table and index, + * if applicable.
*/ ContributorInsightsStatus?: ContributorInsightsStatus | string; } @@ -1511,10 +1560,11 @@ export namespace CreateBackupOutput { *Up to 50 simultaneous table operations are allowed per account. These operations
* include CreateTable
, UpdateTable
,
* DeleteTable
,UpdateTimeToLive
,
- * RestoreTableFromBackup
, and RestoreTableToPointInTime
.
The only exception is when you are creating a table with one or more secondary indexes. You can have up to - * 25 such requests running at a time; however, if the table or index specifications are complex, DynamoDB might temporarily - * reduce the number of concurrent operations.
+ *RestoreTableFromBackup
, and RestoreTableToPointInTime
.
+ * The only exception is when you are creating a table with one or more secondary + * indexes. You can have up to 25 such requests running at a time; however, if the table or + * index specifications are complex, DynamoDB might temporarily reduce the number + * of concurrent operations.
*There is a soft account quota of 256 tables.
*/ export interface LimitExceededException extends __SmithyException, $MetadataBearer { @@ -1536,7 +1586,8 @@ export namespace LimitExceededException { } /** - *A target table with the specified name is either being created or deleted.
+ *A target table with the specified name is either being created or deleted. + *
*/ export interface TableInUseException extends __SmithyException, $MetadataBearer { name: "TableInUseException"; @@ -1554,7 +1605,8 @@ export namespace TableInUseException { } /** - *A source table with the name TableName
does not currently exist within the subscriber's account.
A source table with the name TableName
does not currently exist within
+ * the subscriber's account.
Represents attributes that are copied (projected) from the table into an index. These - * are in addition to the primary key attributes and index key attributes, which are - * automatically projected.
+ * are in addition to the primary key attributes and index key attributes, which are + * automatically projected. */ Projection: Projection | undefined; /** - *Represents the provisioned throughput settings for the specified global secondary index.
- *For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *Represents the provisioned throughput settings for the specified global secondary + * index.
+ *For current minimum and maximum provisioned throughput values, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ ProvisionedThroughput?: ProvisionedThroughput; } @@ -1651,13 +1706,13 @@ export namespace CreateGlobalTableInput { export type GlobalTableStatus = "ACTIVE" | "CREATING" | "DELETING" | "UPDATING"; /** - *Replica-specific provisioned throughput settings. If not specified, uses the - * source table's provisioned throughput settings.
+ *Replica-specific provisioned throughput settings. If not specified, uses the source + * table's provisioned throughput settings.
*/ export interface ProvisionedThroughputOverride { /** - *Replica-specific read capacity units. If not specified, uses the source table's - * read capacity settings.
+ *Replica-specific read capacity units. If not specified, uses the source table's read + * capacity settings.
*/ ReadCapacityUnits?: number; } @@ -1715,36 +1770,43 @@ export interface ReplicaDescription { /** *The current state of the replica:
- *
- * CREATING
- The replica is being created.
+ * CREATING
- The replica is being created.
- * UPDATING
- The replica is being updated.
+ * UPDATING
- The replica is being updated.
- * DELETING
- The replica is being deleted.
+ * DELETING
- The replica is being deleted.
- * ACTIVE
- The replica is ready for use.
+ * ACTIVE
- The replica is ready for use.
- * REGION_DISABLED
- The replica is inaccessible because the AWS Region has been disabled.
If the AWS Region remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
- *
+ * REGION_DISABLED
- The replica is inaccessible because the Amazon Web Services Region has been disabled.
If the Amazon Web Services Region remains inaccessible for more than 20 + * hours, DynamoDB will remove this replica from the replication + * group. The replica will not be deleted and replication will stop from and to + * this region.
+ *
- * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The AWS KMS key used to encrypt the table is inaccessible.
If the AWS KMS key remains inaccessible for more than 20 hours, DynamoDB will remove this replica from the replication group. The replica will not be deleted and replication will stop from and to this region.
- *
+ * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The KMS key
+ * used to encrypt the table is inaccessible.
If the KMS key remains inaccessible for more than 20 hours, + * DynamoDB will remove this replica from the replication group. + * The replica will not be deleted and replication will stop from and to this + * region.
+ *Specifies the progress of a Create, Update, or Delete action on the replica - * as a percentage.
+ *Specifies the progress of a Create, Update, or Delete action on the replica as a + * percentage.
*/ ReplicaStatusPercentProgress?: string; /** - *The AWS KMS customer master key (CMK) of the replica that will be used for AWS KMS - * encryption.
+ *The KMS key of the replica that will be used for + * KMS encryption.
*/ KMSMasterKeyId?: string; /** *Replica-specific provisioned throughput. If not described, uses the source table's - * provisioned throughput settings.
+ * provisioned throughput settings. */ ProvisionedThroughputOverride?: ProvisionedThroughputOverride; @@ -1779,7 +1841,8 @@ export interface ReplicaDescription { GlobalSecondaryIndexes?: ReplicaGlobalSecondaryIndexDescription[]; /** - *The time at which the replica was first detected as inaccessible. To determine cause of inaccessibility check the ReplicaStatus
property.
The time at which the replica was first detected as inaccessible. To determine cause
+ * of inaccessibility check the ReplicaStatus
property.
The current state of the global table:
- *
- * CREATING
- The global table is being created.
+ * CREATING
- The global table is being created.
- * UPDATING
- The global table is being updated.
+ * UPDATING
- The global table is being updated.
- * DELETING
- The global table is being deleted.
+ * DELETING
- The global table is being deleted.
- * ACTIVE
- The global table is ready for use.
+ * ACTIVE
- The global table is ready for use.
Replica table GSI-specific provisioned throughput. If not specified, uses the - * source table GSI's read capacity settings.
+ *Replica table GSI-specific provisioned throughput. If not specified, uses the source + * table GSI's read capacity settings.
*/ ProvisionedThroughputOverride?: ProvisionedThroughputOverride; } @@ -1938,16 +2001,17 @@ export interface CreateReplicationGroupMemberAction { RegionName: string | undefined; /** - *The AWS KMS customer master key (CMK) that should be used for AWS KMS encryption - * in the new replica. To specify a CMK, use its key ID, Amazon Resource Name (ARN), - * alias name, or alias ARN. Note that you should only provide this parameter if the - * key is different from the default DynamoDB KMS master key alias/aws/dynamodb.
+ *The KMS key that should be used for KMS encryption in
+ * the new replica. To specify a key, use its key ID, Amazon Resource Name (ARN), alias
+ * name, or alias ARN. Note that you should only provide this parameter if the key is
+ * different from the default DynamoDB KMS key
+ * alias/aws/dynamodb
.
Replica-specific provisioned throughput. If not specified, uses the source table's - * provisioned throughput settings.
+ * provisioned throughput settings. */ ProvisionedThroughputOverride?: ProvisionedThroughputOverride; @@ -1971,43 +2035,50 @@ export namespace CreateReplicationGroupMemberAction { */ export interface GlobalSecondaryIndex { /** - *The name of the global secondary index. The name must be unique among all other indexes on this table.
+ *The name of the global secondary index. The name must be unique among all other + * indexes on this table.
*/ IndexName: string | undefined; /** - *The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:
- *The complete key schema for a global secondary index, which consists of one or more + * pairs of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of + * an internal hash function to evenly distribute data items across partitions, based + * on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into the global - * secondary index. These are in addition to the primary key attributes and index key - * attributes, which are automatically projected.
+ * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected. */ Projection: Projection | undefined; /** - *Represents the provisioned throughput settings for the specified global secondary index.
- *For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *Represents the provisioned throughput settings for the specified global secondary + * index.
+ *For current minimum and maximum provisioned throughput values, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ ProvisionedThroughput?: ProvisionedThroughput; } @@ -2026,37 +2097,41 @@ export namespace GlobalSecondaryIndex { */ export interface LocalSecondaryIndex { /** - *The name of the local secondary index. The name must be unique among all other indexes on this table.
+ *The name of the local secondary index. The name must be unique among all other indexes + * on this table.
*/ IndexName: string | undefined; /** - *The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:
- *The complete key schema for the local secondary index, consisting of one or more pairs + * of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of + * an internal hash function to evenly distribute data items across partitions, based + * on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into the local - * secondary index. These are in addition to the primary key attributes and index key - * attributes, which are automatically projected.
+ * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected. */ Projection: Projection | undefined; } @@ -2075,30 +2150,30 @@ export namespace LocalSecondaryIndex { */ export interface SSESpecification { /** - *Indicates whether server-side encryption is done using an AWS managed CMK or an AWS owned CMK. If enabled (true),
- * server-side encryption type is set to KMS
and an AWS managed CMK is used (AWS KMS charges apply). If disabled (false) or not specified, server-side
- * encryption is set to AWS owned CMK.
Indicates whether server-side encryption is done using an Amazon Web Services managed
+ * key or an Amazon Web Services owned key. If enabled (true), server-side encryption type
+ * is set to KMS
and an Amazon Web Services managed key is used (KMS charges apply). If disabled (false) or not specified, server-side
+ * encryption is set to Amazon Web Services owned key.
Server-side encryption type. The only supported value is:
- *
- * KMS
- Server-side encryption that uses AWS Key Management Service. The
- * key is stored in your account and is managed by AWS KMS (AWS KMS charges
- * apply).
+ * KMS
- Server-side encryption that uses Key Management Service. The
+ * key is stored in your account and is managed by KMS (KMS charges apply).
The AWS KMS customer master key (CMK) that should be used for the AWS KMS encryption. To - * specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note - * that you should only provide this parameter if the key is different from the default - * DynamoDB customer master key alias/aws/dynamodb.
+ *The KMS key that should be used for the KMS encryption.
+ * To specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN.
+ * Note that you should only provide this parameter if the key is different from the
+ * default DynamoDB key alias/aws/dynamodb
.
Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single DynamoDB table. - *
- * AWS-assigned tag names and values are automatically assigned the aws:
- * prefix, which the user cannot assign. AWS-assigned tag names do not count towards the
- * tag limit of 50. User-assigned tag names have the prefix user:
in the Cost
- * Allocation Report. You cannot backdate the application of a tag.
For an overview on tagging DynamoDB resources, see - * Tagging for DynamoDB - * in the Amazon DynamoDB Developer Guide.
+ *Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a single + * DynamoDB table.
+ *Amazon Web Services-assigned tag names and values are automatically assigned the
+ * aws:
prefix, which the user cannot assign. Amazon Web Services-assigned
+ * tag names do not count towards the tag limit of 50. User-assigned tag names have the
+ * prefix user:
in the Cost Allocation Report. You cannot backdate the
+ * application of a tag.
For an overview on tagging DynamoDB resources, see Tagging + * for DynamoDB in the Amazon DynamoDB Developer + * Guide.
*/ export interface Tag { /** - *The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only have up to - * one tag with the same key. If you try to add an existing tag (same key), the existing - * tag value will be updated to the new value.
+ *The key of the tag. Tag keys are case sensitive. Each DynamoDB table can + * only have up to one tag with the same key. If you try to add an existing tag (same key), + * the existing tag value will be updated to the new value.
*/ Key: string | undefined; @@ -2161,105 +2237,109 @@ export interface CreateTableInput { TableName: string | undefined; /** - *Specifies the attributes that make up the primary key for a table or an index. The attributes
- * in KeySchema
must also be defined in the AttributeDefinitions
array. For more
- * information, see Data Model in the
- * Amazon DynamoDB Developer Guide.
Each KeySchemaElement
in the array is composed of:
Specifies the attributes that make up the primary key for a table or an index. The
+ * attributes in KeySchema
must also be defined in the
+ * AttributeDefinitions
array. For more information, see Data
+ * Model in the Amazon DynamoDB Developer Guide.
Each KeySchemaElement
in the array is composed of:
- * AttributeName
- The name of this key attribute.
+ * AttributeName
- The name of this key attribute.
- * KeyType
- The role that the key attribute will assume:
+ * KeyType
- The role that the key attribute will assume:
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash - * attribute. The term "hash attribute" derives from the DynamoDB usage of - * an internal hash function to evenly distribute data items across partitions, based - * on their partition key values.
+ * attribute. The term "hash attribute" derives from the DynamoDB usage + * of an internal hash function to evenly distribute data items across partitions, + * based on their partition key values. *The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *For a simple primary key (partition key), you must provide
- * exactly one element with a KeyType
of HASH
.
For a composite primary key (partition key and sort key), you must provide exactly two
- * elements, in this order: The first element must have a KeyType
of HASH
,
- * and the second element must have a KeyType
of RANGE
.
For more information, see Working with Tables in the Amazon DynamoDB Developer + *
For a simple primary key (partition key), you must provide exactly one element with a
+ * KeyType
of HASH
.
For a composite primary key (partition key and sort key), you must provide exactly two
+ * elements, in this order: The first element must have a KeyType
of
+ * HASH
, and the second element must have a KeyType
of
+ * RANGE
.
For more information, see Working with Tables in the Amazon DynamoDB Developer * Guide.
*/ KeySchema: KeySchemaElement[] | undefined; /** - *One or more local secondary indexes (the maximum is 5) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size limit per partition key value; otherwise, the size of a local secondary index is unconstrained.
- *Each local secondary index in the array includes the following:
- *One or more local secondary indexes (the maximum is 5) to be created on the table. + * Each index is scoped to a given partition key value. There is a 10 GB size limit per + * partition key value; otherwise, the size of a local secondary index is + * unconstrained.
+ *Each local secondary index in the array includes the following:
+ *
- * IndexName
- The name of the local secondary index. Must be unique only for this table.
+ * IndexName
- The name of the local secondary index. Must be unique
+ * only for this table.
- * KeySchema
- Specifies the key schema for the local secondary index. The key schema must begin with
- * the same partition key as the table.
+ * KeySchema
- Specifies the key schema for the local secondary index.
+ * The key schema must begin with the same partition key as the table.
- * Projection
- Specifies
- * attributes that are copied (projected) from the table into the index. These are in
- * addition to the primary key attributes and index key
- * attributes, which are automatically projected. Each
- * attribute specification is composed of:
+ * Projection
- Specifies attributes that are copied (projected) from
+ * the table into the index. These are in addition to the primary key attributes
+ * and index key attributes, which are automatically projected. Each attribute
+ * specification is composed of:
- * ProjectionType
- One
- * of the following:
+ * ProjectionType
- One of the following:
- * KEYS_ONLY
- Only the index and primary keys are projected into the
- * index.
+ * KEYS_ONLY
- Only the index and primary keys are
+ * projected into the index.
+ *
* INCLUDE
- Only the specified table attributes are
* projected into the index. The list of projected attributes is in
* NonKeyAttributes
.
- * ALL
- All of the table attributes are projected into the
- * index.
+ * ALL
- All of the table attributes are projected
+ * into the index.
- * NonKeyAttributes
- A list of one or more non-key
- * attribute names that are projected into the secondary index. The total
- * count of attributes provided in NonKeyAttributes
,
- * summed across all of the secondary indexes, must not exceed 100. If you
- * project the same attribute into two different indexes, this counts as
- * two distinct attributes when determining the total.
NonKeyAttributes
- A list of one or more non-key attribute
+ * names that are projected into the secondary index. The total count of
+ * attributes provided in NonKeyAttributes
, summed across all
+ * of the secondary indexes, must not exceed 100. If you project the same
+ * attribute into two different indexes, this counts as two distinct
+ * attributes when determining the total.
+ *
* One or more global secondary indexes (the maximum is 20) to be created on the table. Each global secondary index in the array includes the following:
- *One or more global secondary indexes (the maximum is 20) to be created on the table. + * Each global secondary index in the array includes the following:
+ *
- * IndexName
- The name of the global secondary index. Must be unique only for this table.
+ * IndexName
- The name of the global secondary index. Must be unique
+ * only for this table.
- * KeySchema
- Specifies the key schema for the global secondary index.
+ * KeySchema
- Specifies the key schema for the global secondary
+ * index.
- * Projection
- Specifies
- * attributes that are copied (projected) from the table into the index. These are in
- * addition to the primary key attributes and index key
- * attributes, which are automatically projected. Each
- * attribute specification is composed of:
+ * Projection
- Specifies attributes that are copied (projected) from
+ * the table into the index. These are in addition to the primary key attributes
+ * and index key attributes, which are automatically projected. Each attribute
+ * specification is composed of:
- * ProjectionType
- One
- * of the following:
+ * ProjectionType
- One of the following:
- * KEYS_ONLY
- Only the index and primary keys are projected into the
- * index.
+ * KEYS_ONLY
- Only the index and primary keys are
+ * projected into the index.
+ *
* INCLUDE
- Only the specified table attributes are
* projected into the index. The list of projected attributes is in
* NonKeyAttributes
.
- * ALL
- All of the table attributes are projected into the
- * index.
+ * ALL
- All of the table attributes are projected
+ * into the index.
- * NonKeyAttributes
- A list of one or more non-key attribute names that are
- * projected into the secondary index. The total count of attributes provided in NonKeyAttributes
, summed across all of the secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
+ * NonKeyAttributes
- A list of one or more non-key attribute
+ * names that are projected into the secondary index. The total count of
+ * attributes provided in NonKeyAttributes
, summed across all
+ * of the secondary indexes, must not exceed 100. If you project the same
+ * attribute into two different indexes, this counts as two distinct
+ * attributes when determining the total.
- * ProvisionedThroughput
- The provisioned throughput settings for the global secondary index,
- * consisting of read and write capacity units.
+ * ProvisionedThroughput
- The provisioned throughput settings for the
+ * global secondary index, consisting of read and write capacity units.
Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later.
- *Controls how you are charged for read and write throughput and how you manage + * capacity. This setting can be changed later.
+ *
- * PROVISIONED
- We recommend using PROVISIONED
for predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
+ * PROVISIONED
- We recommend using PROVISIONED
for
+ * predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
- * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode to On-Demand Mode.
- *
+ * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
+ * for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode
+ * to On-Demand Mode.
Represents the provisioned throughput settings for a specified table or index. The
* settings can be modified using the UpdateTable
operation.
If you set BillingMode as PROVISIONED
, you must specify this property. If you
- * set BillingMode as PAY_PER_REQUEST
, you cannot specify this
+ *
If you set BillingMode as PROVISIONED
, you must specify this property.
+ * If you set BillingMode as PAY_PER_REQUEST
, you cannot specify this
* property.
For current minimum and maximum provisioned throughput values, see Service, * Account, and Table Quotas in the Amazon DynamoDB Developer @@ -2355,38 +2443,38 @@ export interface CreateTableInput { /** *
The settings for DynamoDB Streams on the table. These settings consist of:
- *+ *
* StreamEnabled
- Indicates whether DynamoDB Streams is to be enabled
* (true) or disabled (false).
- * StreamViewType
- When an item in the table is modified, StreamViewType
- * determines what information is written to the table's stream. Valid values for
- * StreamViewType
are:
+ * StreamViewType
- When an item in the table is modified,
+ * StreamViewType
determines what information is written to the
+ * table's stream. Valid values for StreamViewType
are:
- * KEYS_ONLY
- Only the key attributes of the modified item are written to the
- * stream.
+ * KEYS_ONLY
- Only the key attributes of the modified item
+ * are written to the stream.
- * NEW_IMAGE
- The entire item, as it appears after it was modified, is written
- * to the stream.
+ * NEW_IMAGE
- The entire item, as it appears after it was
+ * modified, is written to the stream.
- * OLD_IMAGE
- The entire item, as it appeared before it was modified, is
- * written to the stream.
+ * OLD_IMAGE
- The entire item, as it appeared before it was
+ * modified, is written to the stream.
- * NEW_AND_OLD_IMAGES
- Both the new and the old item images of the item are
- * written to the stream.
+ * NEW_AND_OLD_IMAGES
- Both the new and the old item images
+ * of the item are written to the stream.
A list of key-value pairs to label the table. For more information, see Tagging for DynamoDB.
+ *A list of key-value pairs to label the table. For more information, see Tagging + * for DynamoDB.
*/ Tags?: Tag[]; } @@ -2416,7 +2505,8 @@ export namespace CreateTableInput { export type IndexStatus = "ACTIVE" | "CREATING" | "DELETING" | "UPDATING"; /** - *Represents the provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.
+ *Represents the provisioned throughput settings for the table, consisting of read and + * write capacity units, along with data about increases and decreases.
*/ export interface ProvisionedThroughputDescription { /** @@ -2430,22 +2520,25 @@ export interface ProvisionedThroughputDescription { LastDecreaseDateTime?: Date; /** - *The number of provisioned throughput decreases for this table during this UTC calendar day. - * For current maximums on provisioned throughput decreases, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *The number of provisioned throughput decreases for this table during this UTC calendar + * day. For current maximums on provisioned throughput decreases, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ NumberOfDecreasesToday?: number; /** - *The maximum number of strongly consistent reads consumed per second before DynamoDB returns a
- * ThrottlingException
. Eventually consistent reads require less effort than strongly
- * consistent reads, so a setting of 50 ReadCapacityUnits
per second provides 100
- * eventually consistent ReadCapacityUnits
per second.
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
. Eventually consistent reads require less
+ * effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits
+ * per second provides 100 eventually consistent ReadCapacityUnits
per
+ * second.
The maximum number of writes consumed per second before DynamoDB returns a
- * ThrottlingException
.
ThrottlingException
.
*/
WriteCapacityUnits?: number;
}
@@ -2469,86 +2562,98 @@ export interface GlobalSecondaryIndexDescription {
IndexName?: string;
/**
- * The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:
- *The complete key schema for a global secondary index, which consists of one or more + * pairs of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across + * partitions, based on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into the global - * secondary index. These are in addition to the primary key attributes and index key - * attributes, which are automatically projected.
+ * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected. */ Projection?: Projection; /** *The current state of the global secondary index:
- *
- * CREATING
- The index is being created.
+ * CREATING
- The index is being created.
- * UPDATING
- The index is being updated.
+ * UPDATING
- The index is being updated.
- * DELETING
- The index is being deleted.
+ * DELETING
- The index is being deleted.
- * ACTIVE
- The index is ready for use.
+ * ACTIVE
- The index is ready for use.
Indicates whether the index is currently backfilling. Backfilling is the process of reading items from
- * the table and determining whether they can be added to the index. (Not all items will qualify: For example, a partition key
- * cannot have any duplicate values.) If an item can be added to the index, DynamoDB will do so. After all items have been processed,
- * the backfilling operation is complete and Backfilling
is false.
You can delete an index that is being created during the Backfilling
phase
- * when IndexStatus
is set to CREATING and Backfilling
is true. You
- * can't delete the index that is being created when IndexStatus
is set to
- * CREATING and Backfilling
is false.
For indexes that were created during a CreateTable
operation, the Backfilling
attribute does not appear in the DescribeTable
output.
Indicates whether the index is currently backfilling. Backfilling
+ * is the process of reading items from the table and determining whether they can be added
+ * to the index. (Not all items will qualify: For example, a partition key cannot have any
+ * duplicate values.) If an item can be added to the index, DynamoDB will do so. After all
+ * items have been processed, the backfilling operation is complete and
+ * Backfilling
is false.
You can delete an index that is being created during the Backfilling
+ * phase when IndexStatus
is set to CREATING and Backfilling
is
+ * true. You can't delete the index that is being created when IndexStatus
is
+ * set to CREATING and Backfilling
is false.
For indexes that were created during a CreateTable
operation, the
+ * Backfilling
attribute does not appear in the
+ * DescribeTable
output.
Represents the provisioned throughput settings for the specified global secondary index.
- *For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *Represents the provisioned throughput settings for the specified global secondary + * index.
+ *For current minimum and maximum provisioned throughput values, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ ProvisionedThroughput?: ProvisionedThroughputDescription; /** - *The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The total size of the specified index, in bytes. DynamoDB updates this value + * approximately every six hours. Recent changes might not be reflected in this + * value.
*/ IndexSizeBytes?: number; /** - *The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The number of items in the specified index. DynamoDB updates this value approximately + * every six hours. Recent changes might not be reflected in this value.
*/ ItemCount?: number; @@ -2577,42 +2682,49 @@ export interface LocalSecondaryIndexDescription { IndexName?: string; /** - *The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:
- *The complete key schema for the local secondary index, consisting of one or more pairs + * of attribute names and key types:
+ *
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
+ *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's usage of + * an internal hash function to evenly distribute data items across partitions, based + * on their partition key values.
*The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *Represents attributes that are copied (projected) from the table into the global - * secondary index. These are in addition to the primary key attributes and index key - * attributes, which are automatically projected.
+ * secondary index. These are in addition to the primary key attributes and index key + * attributes, which are automatically projected. */ Projection?: Projection; /** - *The total size of the specified index, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The total size of the specified index, in bytes. DynamoDB updates this value + * approximately every six hours. Recent changes might not be reflected in this + * value.
*/ IndexSizeBytes?: number; /** - *The number of items in the specified index. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The number of items in the specified index. DynamoDB updates this value + * approximately every six hours. Recent changes might not be reflected in this + * value.
*/ ItemCount?: number; @@ -2679,17 +2791,17 @@ export type TableStatus = */ export interface TableDescription { /** - *An array of AttributeDefinition
objects. Each of these objects describes one attribute
- * in the table and index key schema.
Each AttributeDefinition
object in this array is composed of:
An array of AttributeDefinition
objects. Each of these objects describes
+ * one attribute in the table and index key schema.
Each AttributeDefinition
object in this array is composed of:
- * AttributeName
- The name of the attribute.
+ * AttributeName
- The name of the attribute.
- * AttributeType
- The data type for the attribute.
+ * AttributeType
- The data type for the attribute.
The primary key structure for the table. Each KeySchemaElement
consists of:
The primary key structure for the table. Each KeySchemaElement
consists
+ * of:
- * AttributeName
- The name of the attribute.
+ * AttributeName
- The name of the attribute.
- * KeyType
- The role of the attribute:
+ * KeyType
- The role of the attribute:
- * HASH
- partition key
+ * HASH
- partition key
- * RANGE
- sort key
+ * RANGE
- sort key
The partition key of an item is also known as its hash attribute. The - * term "hash attribute" derives from DynamoDB's usage of an internal hash function to - * evenly distribute data items across partitions, based on their partition key values.
- *The sort key of an item is also known as its range attribute. - * The term "range attribute" derives from the way DynamoDB stores items with the same - * partition key physically close together, in sorted order by the sort key value.
- *The partition key of an item is also known as its hash + * attribute. The term "hash attribute" derives from DynamoDB's + * usage of an internal hash function to evenly distribute data items across + * partitions, based on their partition key values.
+ *The sort key of an item is also known as its range + * attribute. The term "range attribute" derives from the way + * DynamoDB stores items with the same partition key physically close together, + * in sorted order by the sort key value.
+ *For more information about primary keys, see Primary Key in the - * Amazon DynamoDB Developer Guide.
+ *For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer + * Guide.
*/ KeySchema?: KeySchemaElement[]; /** *The current state of the table:
- *
- * CREATING
- The table is being created.
+ * CREATING
- The table is being created.
- * UPDATING
- The table is being updated.
+ * UPDATING
- The table is being updated.
- * DELETING
- The table is being deleted.
+ * DELETING
- The table is being deleted.
- * ACTIVE
- The table is ready for use.
+ * ACTIVE
- The table is ready for use.
- * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The AWS KMS
- * key used to encrypt the table in inaccessible. Table operations
- * may fail due to failure to use the AWS KMS key. DynamoDB will
- * initiate the table archival process when a table's AWS KMS key
- * remains inaccessible for more than seven days.
- *
+ * INACCESSIBLE_ENCRYPTION_CREDENTIALS
- The KMS key
+ * used to encrypt the table in inaccessible. Table operations may fail due to
+ * failure to use the KMS key. DynamoDB will initiate the
+ * table archival process when a table's KMS key remains
+ * inaccessible for more than seven days.
- * ARCHIVING
- The table is being archived. Operations
- * are not allowed until archival is complete.
- *
+ * ARCHIVING
- The table is being archived. Operations are not allowed
+ * until archival is complete.
- * ARCHIVED
- The table has been archived. See the
- * ArchivalReason for more information.
- *
+ * ARCHIVED
- The table has been archived. See the ArchivalReason for
+ * more information.
The provisioned throughput settings for the table, consisting of read and write capacity units, along with data about increases and decreases.
+ *The provisioned throughput settings for the table, consisting of read and write + * capacity units, along with data about increases and decreases.
*/ ProvisionedThroughput?: ProvisionedThroughputDescription; /** - *The total size of the specified table, in bytes. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The total size of the specified table, in bytes. DynamoDB updates this value + * approximately every six hours. Recent changes might not be reflected in this + * value.
*/ TableSizeBytes?: number; /** - *The number of items in the specified table. DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value.
+ *The number of items in the specified table. DynamoDB updates this value approximately + * every six hours. Recent changes might not be reflected in this value.
*/ ItemCount?: number; @@ -2816,179 +2932,191 @@ export interface TableDescription { BillingModeSummary?: BillingModeSummary; /** - *Represents one or more local secondary indexes on the table. Each index is scoped to a given partition key value. Tables with one or more local secondary indexes are subject to an item collection size limit, where the amount of data within a given item collection cannot exceed 10 GB. Each element is composed of:
- *Represents one or more local secondary indexes on the table. Each index is scoped to a + * given partition key value. Tables with one or more local secondary indexes are subject + * to an item collection size limit, where the amount of data within a given item + * collection cannot exceed 10 GB. Each element is composed of:
+ *
- * IndexName
- The name of the local secondary index.
+ * IndexName
- The name of the local secondary index.
- * KeySchema
- Specifies the complete index key schema. The attribute names in the
- * key schema must be between 1 and 255 characters (inclusive). The key schema must begin
- * with the same partition key as the table.
+ * KeySchema
- Specifies the complete index key schema. The attribute
+ * names in the key schema must be between 1 and 255 characters (inclusive). The
+ * key schema must begin with the same partition key as the table.
- * Projection
- Specifies
- * attributes that are copied (projected) from the table into the index. These are in
- * addition to the primary key attributes and index key
- * attributes, which are automatically projected. Each
- * attribute specification is composed of:
+ * Projection
- Specifies attributes that are copied (projected) from
+ * the table into the index. These are in addition to the primary key attributes
+ * and index key attributes, which are automatically projected. Each attribute
+ * specification is composed of:
- * ProjectionType
- One
- * of the following:
+ * ProjectionType
- One of the following:
- * KEYS_ONLY
- Only the index and primary keys are projected into the
- * index.
+ * KEYS_ONLY
- Only the index and primary keys are
+ * projected into the index.
- * INCLUDE
- Only the specified table attributes are projected
- * into the index. The list of projected attributes is in
- * NonKeyAttributes
.
+ * INCLUDE
- Only the specified table attributes are
+ * projected into the index. The list of projected attributes is in
+ * NonKeyAttributes
.
- * ALL
- All of the table attributes are projected into the
- * index.
+ * ALL
- All of the table attributes are projected
+ * into the index.
- * NonKeyAttributes
- A list of one or more non-key attribute names that are
- * projected into the secondary index. The total count of attributes provided in NonKeyAttributes
, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
+ * NonKeyAttributes
- A list of one or more non-key attribute
+ * names that are projected into the secondary index. The total count of
+ * attributes provided in NonKeyAttributes
, summed across all
+ * of the secondary indexes, must not exceed 20. If you project the same
+ * attribute into two different indexes, this counts as two distinct
+ * attributes when determining the total.
- * IndexSizeBytes
- Represents the total size of the index, in bytes. DynamoDB updates
- * this value approximately every six hours. Recent changes might not be reflected in this
- * value.
+ * IndexSizeBytes
- Represents the total size of the index, in bytes.
+ * DynamoDB updates this value approximately every six hours. Recent changes might
+ * not be reflected in this value.
- * ItemCount
- Represents the number of items in the index. DynamoDB updates this value
- * approximately every six hours. Recent changes might not be reflected in this value.
+ * ItemCount
- Represents the number of items in the index. DynamoDB
+ * updates this value approximately every six hours. Recent changes might not be
+ * reflected in this value.
If the table is in the DELETING
state, no information about indexes will be
- * returned.
If the table is in the DELETING
state, no information about indexes will
+ * be returned.
The global secondary indexes, if any, on the table. Each index is scoped to a given partition key value. Each element is composed of:
- *The global secondary indexes, if any, on the table. Each index is scoped to a given + * partition key value. Each element is composed of:
+ *
- * Backfilling
- If true, then the index is currently in the backfilling
- * phase. Backfilling occurs only when a new global secondary index is added to the
- * table. It is the process by which DynamoDB populates the new index with data from the
- * table. (This attribute does not appear for indexes that were created during a
- * CreateTable
operation.)
You can delete an index that is being created during the Backfilling
- * phase when IndexStatus
is set to CREATING and Backfilling
- * is true. You can't delete the index that is being created when
- * IndexStatus
is set to CREATING and Backfilling
is false.
- * (This attribute does not appear for indexes that were created during a
- * CreateTable
operation.)
+ * Backfilling
- If true, then the index is currently in the
+ * backfilling phase. Backfilling occurs only when a new global secondary index is
+ * added to the table. It is the process by which DynamoDB populates the new index
+ * with data from the table. (This attribute does not appear for indexes that were
+ * created during a CreateTable
operation.)
You can delete an index that is being created during the
+ * Backfilling
phase when IndexStatus
is set to
+ * CREATING and Backfilling
is true. You can't delete the index that
+ * is being created when IndexStatus
is set to CREATING and
+ * Backfilling
is false. (This attribute does not appear for
+ * indexes that were created during a CreateTable
operation.)
- * IndexName
- The name of the global secondary index.
+ * IndexName
- The name of the global secondary index.
- * IndexSizeBytes
- The total size of the global secondary index, in bytes. DynamoDB updates this value approximately every six
- * hours. Recent changes might not be reflected in this value.
- *
+ * IndexSizeBytes
- The total size of the global secondary index, in
+ * bytes. DynamoDB updates this value approximately every six hours. Recent changes
+ * might not be reflected in this value.
- * IndexStatus
- The current status of the global secondary index:
+ * IndexStatus
- The current status of the global secondary
+ * index:
- * CREATING
- The index is being created.
+ * CREATING
- The index is being created.
- * UPDATING
- The index is being updated.
+ * UPDATING
- The index is being updated.
- * DELETING
- The index is being deleted.
+ * DELETING
- The index is being deleted.
- * ACTIVE
- The index is ready for use.
+ * ACTIVE
- The index is ready for use.
- * ItemCount
- The number of items in the global secondary index. DynamoDB updates this value approximately every six
- * hours. Recent changes might not be reflected in this value.
- *
+ * ItemCount
- The number of items in the global secondary index.
+ * DynamoDB updates this value approximately every six hours. Recent changes might
+ * not be reflected in this value.
- * KeySchema
- Specifies the complete index key schema. The attribute names in the
- * key schema must be between 1 and 255 characters (inclusive). The key schema must begin
- * with the same partition key as the table.
+ * KeySchema
- Specifies the complete index key schema. The attribute
+ * names in the key schema must be between 1 and 255 characters (inclusive). The
+ * key schema must begin with the same partition key as the table.
- * Projection
- Specifies
- * attributes that are copied (projected) from the table into the index. These are in
- * addition to the primary key attributes and index key
- * attributes, which are automatically projected. Each
- * attribute specification is composed of:
+ * Projection
- Specifies attributes that are copied (projected) from
+ * the table into the index. These are in addition to the primary key attributes
+ * and index key attributes, which are automatically projected. Each attribute
+ * specification is composed of:
- * ProjectionType
- One
- * of the following:
+ * ProjectionType
- One of the following:
- * KEYS_ONLY
- Only the index and primary keys are projected into the
- * index.
+ * KEYS_ONLY
- Only the index and primary keys are
+ * projected into the index.
- * INCLUDE
- In addition to the attributes described in KEYS_ONLY
, the secondary index will include other non-key attributes that you specify.
+ * INCLUDE
- In addition to the attributes described
+ * in KEYS_ONLY
, the secondary index will include
+ * other non-key attributes that you specify.
- * ALL
- All of the table attributes are projected into the
- * index.
+ * ALL
- All of the table attributes are projected
+ * into the index.
- * NonKeyAttributes
- A list of one or more non-key attribute names that are
- * projected into the secondary index. The total count of attributes provided in NonKeyAttributes
, summed across all of the secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.
+ * NonKeyAttributes
- A list of one or more non-key attribute
+ * names that are projected into the secondary index. The total count of
+ * attributes provided in NonKeyAttributes
, summed across all
+ * of the secondary indexes, must not exceed 20. If you project the same
+ * attribute into two different indexes, this counts as two distinct
+ * attributes when determining the total.
- * ProvisionedThroughput
- The provisioned throughput settings for the global secondary index,
- * consisting of read and write capacity units, along with data about increases and
- * decreases.
+ * ProvisionedThroughput
- The provisioned throughput settings for the
+ * global secondary index, consisting of read and write capacity units, along with
+ * data about increases and decreases.
If the table is in the DELETING
state, no information about indexes will be
- * returned.
If the table is in the DELETING
state, no information about indexes will
+ * be returned.
A timestamp, in ISO 8601 format, for this stream.
* - *Note that LatestStreamLabel
is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique:
Note that LatestStreamLabel
is not a unique identifier for the stream,
+ * because it is possible that a stream from another table might have the same timestamp.
+ * However, the combination of the following three elements is guaranteed to be
+ * unique:
AWS customer ID
+ *Amazon Web Services customer ID
*Table name
+ *Table name
*+ *
* StreamLabel
*
The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this table.
+ *The Amazon Resource Name (ARN) that uniquely identifies the latest stream for this + * table.
*/ LatestStreamArn?: string; /** - *Represents the version of global tables in use, if the table is replicated across AWS Regions.
+ *Represents the version of global tables + * in use, if the table is replicated across Amazon Web Services Regions.
*/ GlobalTableVersion?: string; @@ -3077,9 +3210,9 @@ export namespace CreateTableOutput { } /** - *The operation conflicts with the resource's availability. For example, you attempted to
- * recreate an existing table, or tried to delete a table currently in the CREATING
- * state.
The operation conflicts with the resource's availability. For example, you
+ * attempted to recreate an existing table, or tried to delete a table currently in the
+ * CREATING
state.
Operation was rejected because there is an ongoing transaction for the item.
+ *Operation was rejected because there is an ongoing transaction for the + * item.
*/ export interface TransactionConflictException extends __SmithyException, $MetadataBearer { name: "TransactionConflictException"; @@ -3280,7 +3414,8 @@ export namespace DescribeBackupOutput { export interface DescribeContinuousBackupsInput { /** - *Name of the table for which the customer wants to check the continuous backups and point in time recovery settings.
+ *Name of the table for which the customer wants to check the continuous backups and + * point in time recovery settings.
*/ TableName: string | undefined; } @@ -3296,7 +3431,8 @@ export namespace DescribeContinuousBackupsInput { export interface DescribeContinuousBackupsOutput { /** - *Represents the continuous backups and point in time recovery settings on the table.
+ *Represents the continuous backups and point in time recovery settings on the + * table.
*/ ContinuousBackupsDescription?: ContinuousBackupsDescription; } @@ -3367,12 +3503,12 @@ export interface DescribeContributorInsightsOutput { IndexName?: string; /** - *List of names of the associated Alpine rules.
+ *List of names of the associated contributor insights rules.
*/ ContributorInsightsRuleList?: string[]; /** - *Current Status contributor insights.
+ *Current status of contributor insights.
*/ ContributorInsightsStatus?: ContributorInsightsStatus | string; @@ -3382,21 +3518,25 @@ export interface DescribeContributorInsightsOutput { LastUpdateDateTime?: Date; /** - *Returns information about the last failure that encountered.
- *The most common exceptions for a FAILED status are:
- *Returns information about the last failure that was encountered.
+ *The most common exceptions for a FAILED status are:
+ *LimitExceededException - Per-account Amazon CloudWatch Contributor Insights rule limit reached. Please disable Contributor Insights for - * other tables/indexes OR disable Contributor Insights rules before retrying.
+ *LimitExceededException - Per-account Amazon CloudWatch Contributor Insights + * rule limit reached. Please disable Contributor Insights for other tables/indexes + * OR disable Contributor Insights rules before retrying.
*AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot be modified due to insufficient permissions.
+ *AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot be + * modified due to insufficient permissions.
*AccessDeniedException - Failed to create service-linked role for Contributor Insights due to insufficient permissions.
+ *AccessDeniedException - Failed to create service-linked role for Contributor + * Insights due to insufficient permissions.
*InternalServerError - Failed to create Amazon CloudWatch Contributor Insights rules. Please retry request.
+ *InternalServerError - Failed to create Amazon CloudWatch Contributor Insights + * rules. Please retry request.
*Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.
+ *Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + * FAILED.
*/ ExportStatus?: ExportStatus | string; @@ -3549,7 +3690,8 @@ export interface ExportDescription { S3Bucket?: string; /** - *The ID of the AWS account that owns the bucket containing the export.
+ *The ID of the Amazon Web Services account that owns the bucket containing the + * export.
*/ S3BucketOwner?: string; @@ -3560,24 +3702,26 @@ export interface ExportDescription { S3Prefix?: string; /** - *Type of encryption used on the bucket where export data is stored. Valid values
- * for S3SseAlgorithm
are:
Type of encryption used on the bucket where export data is stored. Valid values for
+ * S3SseAlgorithm
are:
- * AES256
- server-side encryption with Amazon S3 managed keys
AES256
- server-side encryption with Amazon S3 managed
+ * keys
*
- * KMS
- server-side encryption with AWS KMS managed keys
KMS
- server-side encryption with KMS managed
+ * keys
* The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data is - * stored (if applicable).
+ *The ID of the KMS managed key used to encrypt the S3 bucket where + * export data is stored (if applicable).
*/ S3SseKmsKeyId?: string; @@ -3593,7 +3737,7 @@ export interface ExportDescription { /** *The format of the exported data. Valid values for ExportFormat
are
- * DYNAMODB_JSON
or ION
.
DYNAMODB_JSON
or ION
.
*/
ExportFormat?: ExportFormat | string;
@@ -3722,52 +3866,55 @@ export namespace DescribeGlobalTableSettingsInput {
*/
export interface ReplicaGlobalSecondaryIndexSettingsDescription {
/**
- * The name of the global secondary index. The name must be unique among all other indexes on this table.
+ *The name of the global secondary index. The name must be unique among all other + * indexes on this table.
*/ IndexName: string | undefined; /** - *- * The current status of the global secondary index:
- *The current status of the global secondary index:
+ *
- * CREATING
- The global secondary index is being created.
+ * CREATING
- The global secondary index is being created.
- * UPDATING
- The global secondary index is being updated.
+ * UPDATING
- The global secondary index is being updated.
- * DELETING
- The global secondary index is being deleted.
+ * DELETING
- The global secondary index is being deleted.
- * ACTIVE
- The global secondary index is ready for use.
+ * ACTIVE
- The global secondary index is ready for use.
The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException
.
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
.
Auto scaling settings for a global secondary index replica's read capacity units.
+ *Auto scaling settings for a global secondary index replica's read capacity + * units.
*/ ProvisionedReadCapacityAutoScalingSettings?: AutoScalingSettingsDescription; /** - *The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException
.
The maximum number of writes consumed per second before DynamoDB returns a
+ * ThrottlingException
.
Auto scaling settings for a global secondary index replica's write capacity - * units.
+ * units. */ ProvisionedWriteCapacityAutoScalingSettings?: AutoScalingSettingsDescription; } @@ -3792,22 +3939,22 @@ export interface ReplicaSettingsDescription { /** *The current state of the Region:
- *
- * CREATING
- The Region is being created.
+ * CREATING
- The Region is being created.
- * UPDATING
- The Region is being updated.
+ * UPDATING
- The Region is being updated.
- * DELETING
- The Region is being deleted.
+ * DELETING
- The Region is being deleted.
- * ACTIVE
- The Region is ready for use.
+ * ACTIVE
- The Region is ready for use.
The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException
.
- * For more information, see Specifying Read and Write
- * Requirements in the Amazon DynamoDB Developer Guide.
- *
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB
+ * Developer Guide.
The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException
.
- * For more information, see Specifying Read and Write
- * Requirements in the Amazon DynamoDB Developer Guide.
The maximum number of writes consumed per second before DynamoDB returns a
+ * ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB
+ * Developer Guide.
Represents the input of a DescribeLimits
operation. Has no content.
Represents the input of a DescribeLimits
operation. Has no
+ * content.
The maximum total write capacity units that your account allows you to provision across - * all of your tables in this Region.
+ *The maximum total write capacity units that your account allows you to provision + * across all of your tables in this Region.
*/ AccountMaxWriteCapacityUnits?: number; @@ -4066,22 +4213,22 @@ export interface ReplicaGlobalSecondaryIndexAutoScalingDescription { /** *The current state of the replica global secondary index:
- *
- * CREATING
- The index is being created.
+ * CREATING
- The index is being created.
- * UPDATING
- The index is being updated.
+ * UPDATING
- The index is being updated.
- * DELETING
- The index is being deleted.
+ * DELETING
- The index is being deleted.
- * ACTIVE
- The index is ready for use.
+ * ACTIVE
- The index is ready for use.
Represents the auto scaling settings for a global table or global secondary - * index.
+ * index. */ ProvisionedReadCapacityAutoScalingSettings?: AutoScalingSettingsDescription; /** *Represents the auto scaling settings for a global table or global secondary - * index.
+ * index. */ ProvisionedWriteCapacityAutoScalingSettings?: AutoScalingSettingsDescription; } @@ -4125,34 +4272,34 @@ export interface ReplicaAutoScalingDescription { /** *Represents the auto scaling settings for a global table or global secondary - * index.
+ * index. */ ReplicaProvisionedReadCapacityAutoScalingSettings?: AutoScalingSettingsDescription; /** *Represents the auto scaling settings for a global table or global secondary - * index.
+ * index. */ ReplicaProvisionedWriteCapacityAutoScalingSettings?: AutoScalingSettingsDescription; /** *The current state of the replica:
- *
- * CREATING
- The replica is being created.
+ * CREATING
- The replica is being created.
- * UPDATING
- The replica is being updated.
+ * UPDATING
- The replica is being updated.
- * DELETING
- The replica is being deleted.
+ * DELETING
- The replica is being deleted.
- * ACTIVE
- The replica is ready for use.
+ * ACTIVE
- The replica is ready for use.
The current state of the table:
- *
- * CREATING
- The table is being created.
+ * CREATING
- The table is being created.
- * UPDATING
- The table is being updated.
+ * UPDATING
- The table is being updated.
- * DELETING
- The table is being deleted.
+ * DELETING
- The table is being deleted.
- * ACTIVE
- The table is ready for use.
+ * ACTIVE
- The table is ready for use.
- * There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table. - *
+ *There was an attempt to insert an item with the same primary key as an item that + * already exists in the DynamoDB table.
*/ export interface DuplicateItemException extends __SmithyException, $MetadataBearer { name: "DuplicateItemException"; @@ -4331,8 +4477,8 @@ export namespace DuplicateItemException { } /** - *DynamoDB rejected the request because you retried a request with a different payload but - * with an idempotent token that was already used.
+ *DynamoDB rejected the request because you retried a request with a + * different payload but with an idempotent token that was already used.
*/ export interface IdempotentParameterMismatchException extends __SmithyException, $MetadataBearer { name: "IdempotentParameterMismatchException"; @@ -4401,13 +4547,13 @@ export interface ExportTableToPointInTimeInput { *Providing a ClientToken
makes the call to
* ExportTableToPointInTimeInput
idempotent, meaning that multiple
* identical calls have the same effect as one single call.
A client token is valid for 8 hours after the first request that uses it is - * completed. After 8 hours, any request with the same client token is treated as a new - * request. Do not resubmit the same request with the same client token for more than 8 - * hours, or the result might not be idempotent.
+ *A client token is valid for 8 hours after the first request that uses it is completed. + * After 8 hours, any request with the same client token is treated as a new request. Do + * not resubmit the same request with the same client token for more than 8 hours, or the + * result might not be idempotent.
*If you submit a request with the same client token but a change in other parameters
* within the 8-hour idempotency window, DynamoDB returns an
- * IdempotentParameterMismatch
exception.
IdempotentParameterMismatch
exception.
*/
ClientToken?: string;
@@ -4417,7 +4563,8 @@ export interface ExportTableToPointInTimeInput {
S3Bucket: string | undefined;
/**
- * The ID of the AWS account that owns the bucket the export will be stored in.
+ *The ID of the Amazon Web Services account that owns the bucket the export will be + * stored in.
*/ S3BucketOwner?: string; @@ -4433,19 +4580,21 @@ export interface ExportTableToPointInTimeInput { *
- * AES256
- server-side encryption with Amazon S3 managed keys
AES256
- server-side encryption with Amazon S3 managed
+ * keys
*
- * KMS
- server-side encryption with AWS KMS managed keys
KMS
- server-side encryption with KMS managed
+ * keys
* The ID of the AWS KMS managed key used to encrypt the S3 bucket where export data will - * be stored (if applicable).
+ *The ID of the KMS managed key used to encrypt the S3 bucket where + * export data will be stored (if applicable).
*/ S3SseKmsKeyId?: string; @@ -4530,12 +4679,14 @@ export interface ListBackupsInput { Limit?: number; /** - *Only backups created after this time are listed. TimeRangeLowerBound
is inclusive.
Only backups created after this time are listed. TimeRangeLowerBound
is
+ * inclusive.
Only backups created before this time are listed. TimeRangeUpperBound
is exclusive.
Only backups created before this time are listed. TimeRangeUpperBound
is
+ * exclusive.
LastEvaluatedBackupArn
is the Amazon Resource Name (ARN) of the backup last
* evaluated when the current page of results was returned, inclusive of the current page
* of results. This value may be specified as the ExclusiveStartBackupArn
of a
- * new ListBackups
operation in order to fetch the next page of results.
+ * new ListBackups
operation in order to fetch the next page of results.
+ *
*/
ExclusiveStartBackupArn?: string;
/**
* The backups from the table specified by BackupType
are listed.
Where BackupType
can be:
Where BackupType
can be:
@@ -4558,8 +4710,7 @@ export interface ListBackupsInput { *
- * SYSTEM
- On-demand backup automatically created by
- * DynamoDB.
SYSTEM
- On-demand backup automatically created by DynamoDB.
* @@ -4586,18 +4737,15 @@ export interface ListBackupsOutput { BackupSummaries?: BackupSummary[]; /** - *
- * The ARN of the backup last evaluated when the current page of results was returned,
- * inclusive of the current page of results. This value may be specified as the
- * ExclusiveStartBackupArn
of a new ListBackups
operation in order to fetch the next page of results.
- *
- * If LastEvaluatedBackupArn
is empty, then the last page of results has been processed and there are no
- * more results to be retrieved.
- *
If LastEvaluatedBackupArn
is not empty, this may or may not indicate that
- * there is more data to be returned. All results are guaranteed to have been returned if
- * and only if no value for LastEvaluatedBackupArn
is returned.
The ARN of the backup last evaluated when the current page of results was returned,
+ * inclusive of the current page of results. This value may be specified as the
+ * ExclusiveStartBackupArn
of a new ListBackups
operation in
+ * order to fetch the next page of results.
If LastEvaluatedBackupArn
is empty, then the last page of results has
+ * been processed and there are no more results to be retrieved.
If LastEvaluatedBackupArn
is not empty, this may or may not indicate
+ * that there is more data to be returned. All results are guaranteed to have been returned
+ * if and only if no value for LastEvaluatedBackupArn
is returned.
Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.
+ *Export can be in one of the following states: IN_PROGRESS, COMPLETED, or + * FAILED.
*/ ExportStatus?: ExportStatus | string; } @@ -4740,9 +4889,12 @@ export interface ListGlobalTablesInput { ExclusiveStartGlobalTableName?: string; /** - *The maximum number of table names to return, if the parameter is not specified DynamoDB defaults to 100.
- *If the number of global tables DynamoDB finds reaches this limit, it stops the operation and returns the table names collected up to that point,
- * with a table name in the LastEvaluatedGlobalTableName
to apply in a subsequent operation to the ExclusiveStartGlobalTableName
parameter.
The maximum number of table names to return, if the parameter is not specified + * DynamoDB defaults to 100.
+ *If the number of global tables DynamoDB finds reaches this limit, it stops the
+ * operation and returns the table names collected up to that point, with a table name in
+ * the LastEvaluatedGlobalTableName
to apply in a subsequent operation to the
+ * ExclusiveStartGlobalTableName
parameter.
The first table name that this operation will evaluate. Use the value that was returned for
- * LastEvaluatedTableName
in a previous operation, so that you can obtain the next page
- * of results.
The first table name that this operation will evaluate. Use the value that was
+ * returned for LastEvaluatedTableName
in a previous operation, so that you
+ * can obtain the next page of results.
A maximum number of table names to return. If this parameter is not specified, the limit is 100.
+ *A maximum number of table names to return. If this parameter is not specified, the + * limit is 100.
*/ Limit?: number; } @@ -4837,19 +4990,20 @@ export namespace ListTablesInput { */ export interface ListTablesOutput { /** - *The names of the tables associated with the current account at the current endpoint. The maximum size of this array is 100.
- *If LastEvaluatedTableName
also appears in the output, you can use this value as the
- * ExclusiveStartTableName
parameter in a subsequent ListTables
request and
- * obtain the next page of results.
The names of the tables associated with the current account at the current endpoint. + * The maximum size of this array is 100.
+ *If LastEvaluatedTableName
also appears in the output, you can use this
+ * value as the ExclusiveStartTableName
parameter in a subsequent
+ * ListTables
request and obtain the next page of results.
The name of the last table in the current page of results. Use this value as the
- * ExclusiveStartTableName
in a new request to obtain the next page of results, until
- * all the table names are returned.
If you do not receive a LastEvaluatedTableName
value in the response, this means that
- * there are no more table names to be retrieved.
ExclusiveStartTableName
in a new request to obtain the next page of
+ * results, until all the table names are returned.
+ * If you do not receive a LastEvaluatedTableName
value in the response,
+ * this means that there are no more table names to be retrieved.
The Amazon DynamoDB resource with tags to be listed. This value is an Amazon Resource Name (ARN).
+ *The Amazon DynamoDB resource with tags to be listed. This value is an Amazon Resource + * Name (ARN).
*/ ResourceArn: string | undefined; /** *An optional string that, if supplied, must be copied from the output of a previous - * call to ListTagOfResource. When provided in this manner, this API fetches the next page of results.
+ * call to ListTagOfResource. When provided in this manner, this API fetches the next page + * of results. */ NextToken?: string; } @@ -4892,8 +5048,8 @@ export interface ListTagsOfResourceOutput { Tags?: Tag[]; /** - *If this value is returned, there are additional results to be displayed. To retrieve them, - * call ListTagsOfResource again, with NextToken set to this value.
+ *If this value is returned, there are additional results to be displayed. To retrieve + * them, call ListTagsOfResource again, with NextToken set to this value.
*/ NextToken?: string; } @@ -4926,16 +5082,16 @@ export interface RestoreTableFromBackupInput { BillingModeOverride?: BillingMode | string; /** - *List of global secondary indexes for the restored table. The indexes - * provided should match existing secondary indexes. You can choose to exclude - * some or all of the indexes at the time of restore.
+ *List of global secondary indexes for the restored table. The indexes provided should + * match existing secondary indexes. You can choose to exclude some or all of the indexes + * at the time of restore.
*/ GlobalSecondaryIndexOverride?: GlobalSecondaryIndex[]; /** - *List of local secondary indexes for the restored table. The indexes - * provided should match existing secondary indexes. You can choose to exclude - * some or all of the indexes at the time of restore.
+ *List of local secondary indexes for the restored table. The indexes provided should + * match existing secondary indexes. You can choose to exclude some or all of the indexes + * at the time of restore.
*/ LocalSecondaryIndexOverride?: LocalSecondaryIndex[]; @@ -4994,7 +5150,8 @@ export namespace TableAlreadyExistsException { } /** - *An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime and LatestRestorableDateTime.
+ *An invalid restore time was specified. RestoreDateTime must be between + * EarliestRestorableDateTime and LatestRestorableDateTime.
*/ export interface InvalidRestoreTimeException extends __SmithyException, $MetadataBearer { name: "InvalidRestoreTimeException"; @@ -5013,8 +5170,8 @@ export namespace InvalidRestoreTimeException { export interface RestoreTableToPointInTimeInput { /** - *The DynamoDB table that will be restored. This value is an Amazon - * Resource Name (ARN).
+ *The DynamoDB table that will be restored. This value is an Amazon Resource Name + * (ARN).
*/ SourceTableArn?: string; @@ -5030,7 +5187,7 @@ export interface RestoreTableToPointInTimeInput { /** *Restore the table to the latest possible time. LatestRestorableDateTime
- * is typically 5 minutes before the current time.
List of global secondary indexes for the restored table. The indexes - * provided should match existing secondary indexes. You can choose to exclude - * some or all of the indexes at the time of restore.
+ *List of global secondary indexes for the restored table. The indexes provided should + * match existing secondary indexes. You can choose to exclude some or all of the indexes + * at the time of restore.
*/ GlobalSecondaryIndexOverride?: GlobalSecondaryIndex[]; /** - *List of local secondary indexes for the restored table. The indexes - * provided should match existing secondary indexes. You can choose to exclude - * some or all of the indexes at the time of restore.
+ *List of local secondary indexes for the restored table. The indexes provided should + * match existing secondary indexes. You can choose to exclude some or all of the indexes + * at the time of restore.
*/ LocalSecondaryIndexOverride?: LocalSecondaryIndex[]; @@ -5096,7 +5253,8 @@ export namespace RestoreTableToPointInTimeOutput { export interface TagResourceInput { /** - *Identifies the Amazon DynamoDB resource to which tags should be added. This value is an Amazon Resource Name (ARN).
+ *Identifies the Amazon DynamoDB resource to which tags should be added. This value is + * an Amazon Resource Name (ARN).
*/ ResourceArn: string | undefined; @@ -5143,7 +5301,8 @@ export namespace UntagResourceInput { */ export interface PointInTimeRecoverySpecification { /** - *Indicates whether point in time recovery is enabled (true) or disabled (false) on the table.
+ *Indicates whether point in time recovery is enabled (true) or disabled (false) on the + * table.
*/ PointInTimeRecoveryEnabled: boolean | undefined; } @@ -5180,7 +5339,8 @@ export namespace UpdateContinuousBackupsInput { export interface UpdateContinuousBackupsOutput { /** - *Represents the continuous backups and point in time recovery settings on the table.
+ *Represents the continuous backups and point in time recovery settings on the + * table.
*/ ContinuousBackupsDescription?: ContinuousBackupsDescription; } @@ -5284,15 +5444,15 @@ export namespace ReplicaNotFoundException { /** *Represents one of the following:
- *A new replica to be added to an existing global table.
+ *A new replica to be added to an existing global table.
*New parameters for an existing replica.
+ *New parameters for an existing replica.
*An existing replica to be removed from an existing global table.
+ *An existing replica to be removed from an existing global table.
*Represents the settings of a global secondary index for a global table that will be modified.
+ *Represents the settings of a global secondary index for a global table that will be + * modified.
*/ export interface GlobalTableGlobalSecondaryIndexSettingsUpdate { /** - *The name of the global secondary index. The name must be unique among all other indexes on this table.
+ *The name of the global secondary index. The name must be unique among all other + * indexes on this table.
*/ IndexName: string | undefined; /** - *The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.
+ *
The maximum number of writes consumed per second before DynamoDB returns a
+ * ThrottlingException.
*
Auto scaling settings for managing a global secondary index's write capacity - * units.
+ * units. */ ProvisionedWriteCapacityAutoScalingSettingsUpdate?: AutoScalingSettingsUpdate; } @@ -5404,22 +5567,25 @@ export namespace GlobalTableGlobalSecondaryIndexSettingsUpdate { } /** - *Represents the settings of a global secondary index for a global table that will be modified.
+ *Represents the settings of a global secondary index for a global table that will be + * modified.
*/ export interface ReplicaGlobalSecondaryIndexSettingsUpdate { /** - *The name of the global secondary index. The name must be unique among all other indexes on this table.
+ *The name of the global secondary index. The name must be unique among all other + * indexes on this table.
*/ IndexName: string | undefined; /** - *The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException
.
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
.
Auto scaling settings for managing a global secondary index replica's read capacity - * units.
+ * units. */ ProvisionedReadCapacityAutoScalingSettingsUpdate?: AutoScalingSettingsUpdate; } @@ -5443,20 +5609,21 @@ export interface ReplicaSettingsUpdate { RegionName: string | undefined; /** - *The maximum number of strongly consistent reads consumed per second before DynamoDB returns a ThrottlingException
.
- * For more information, see Specifying Read and Write
- * Requirements in the Amazon DynamoDB Developer Guide.
- *
The maximum number of strongly consistent reads consumed per second before DynamoDB
+ * returns a ThrottlingException
. For more information, see Specifying Read and Write Requirements in the Amazon DynamoDB
+ * Developer Guide.
Auto scaling settings for managing a global table replica's read capacity units.
+ *Auto scaling settings for managing a global table replica's read capacity + * units.
*/ ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate?: AutoScalingSettingsUpdate; /** - *Represents the settings of a global secondary index for a global table that will be modified.
+ *Represents the settings of a global secondary index for a global table that will be + * modified.
*/ ReplicaGlobalSecondaryIndexSettingsUpdate?: ReplicaGlobalSecondaryIndexSettingsUpdate[]; } @@ -5477,23 +5644,28 @@ export interface UpdateGlobalTableSettingsInput { GlobalTableName: string | undefined; /** - *The billing mode of the global table. If GlobalTableBillingMode
is not specified, the global table defaults to PROVISIONED
capacity billing mode.
The billing mode of the global table. If GlobalTableBillingMode
is not
+ * specified, the global table defaults to PROVISIONED
capacity billing
+ * mode.
- * PROVISIONED
- We recommend using PROVISIONED
for predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
+ * PROVISIONED
- We recommend using PROVISIONED
for
+ * predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
- * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode to On-Demand Mode.
- *
+ * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
+ * for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode
+ * to On-Demand Mode.
The maximum number of writes consumed per second before DynamoDB returns a ThrottlingException.
+ *
The maximum number of writes consumed per second before DynamoDB returns a
+ * ThrottlingException.
*
Represents the settings of a global secondary index for a global table that will be modified.
+ *Represents the settings of a global secondary index for a global table that will be + * modified.
*/ GlobalTableGlobalSecondaryIndexSettingsUpdate?: GlobalTableGlobalSecondaryIndexSettingsUpdate[]; @@ -5546,7 +5719,8 @@ export namespace UpdateGlobalTableSettingsOutput { } /** - *Represents the new provisioned throughput settings to be applied to a global secondary index.
+ *Represents the new provisioned throughput settings to be applied to a global secondary + * index.
*/ export interface UpdateGlobalSecondaryIndexAction { /** @@ -5555,8 +5729,11 @@ export interface UpdateGlobalSecondaryIndexAction { IndexName: string | undefined; /** - *Represents the provisioned throughput settings for the specified global secondary index.
- *For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.
+ *Represents the provisioned throughput settings for the specified global secondary + * index.
+ *For current minimum and maximum provisioned throughput values, see Service, + * Account, and Table Quotas in the Amazon DynamoDB Developer + * Guide.
*/ ProvisionedThroughput: ProvisionedThroughput | undefined; } @@ -5572,51 +5749,55 @@ export namespace UpdateGlobalSecondaryIndexAction { /** *Represents one of the following:
- *A new global secondary index to be added to an existing table.
+ *A new global secondary index to be added to an existing table.
*New provisioned throughput parameters for an existing global secondary index.
+ *New provisioned throughput parameters for an existing global secondary + * index.
*An existing global secondary index to be removed from an existing table.
+ *An existing global secondary index to be removed from an existing + * table.
*The name of an existing global secondary index, along with new provisioned throughput settings to be applied to that index.
+ *The name of an existing global secondary index, along with new provisioned throughput + * settings to be applied to that index.
*/ Update?: UpdateGlobalSecondaryIndexAction; /** - *The parameters required for creating a global secondary index on an existing table:
- *The parameters required for creating a global secondary index on an existing + * table:
+ *
- * IndexName
- *
+ * IndexName
+ *
- * KeySchema
- *
+ * KeySchema
+ *
- * AttributeDefinitions
- *
+ * AttributeDefinitions
+ *
- * Projection
- *
+ * Projection
+ *
- * ProvisionedThroughput
- *
+ * ProvisionedThroughput
+ *
The AWS KMS customer master key (CMK) of the replica that should be used for AWS KMS encryption. - * To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias - * ARN. Note that you should only provide this parameter if the key is different from - * the default DynamoDB KMS master key alias/aws/dynamodb.
+ *The KMS key of the replica that should be used
+ * for KMS encryption. To specify a key, use its key ID, Amazon Resource
+ * Name (ARN), alias name, or alias ARN. Note that you should only provide this parameter
+ * if the key is different from the default DynamoDB KMS key
+ * alias/aws/dynamodb
.
Replica-specific provisioned throughput. If not specified, uses the source table's - * provisioned throughput settings.
+ * provisioned throughput settings. */ ProvisionedThroughputOverride?: ProvisionedThroughputOverride; @@ -5677,20 +5859,20 @@ export namespace UpdateReplicationGroupMemberAction { /** *Represents one of the following:
- *A new replica to be added to an existing regional table or global table. This
- * request invokes the CreateTableReplica
action in the destination
- * Region.
A new replica to be added to an existing regional table or global table. This
+ * request invokes the CreateTableReplica
action in the destination
+ * Region.
New parameters for an existing replica. This request invokes the
- * UpdateTable
action in the destination Region.
New parameters for an existing replica. This request invokes the
+ * UpdateTable
action in the destination Region.
An existing replica to be deleted. The request invokes the
- * DeleteTableReplica
action in the destination Region, deleting the
- * replica and all if its items in the destination Region.
An existing replica to be deleted. The request invokes the
+ * DeleteTableReplica
action in the destination Region, deleting
+ * the replica and all if its items in the destination Region.
An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions
must include the key element(s) of the new index.
An array of attributes that describe the key schema for the table and indexes. If you
+ * are adding a new global secondary index to the table, AttributeDefinitions
+ * must include the key element(s) of the new index.
Controls how you are charged for read and write throughput and how you manage capacity. - * When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial - * provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes - * over the past 30 minutes.
- *Controls how you are charged for read and write throughput and how you manage + * capacity. When switching from pay-per-request to provisioned capacity, initial + * provisioned capacity values must be set. The initial provisioned capacity values are + * estimated based on the consumed read and write capacity of your table and global + * secondary indexes over the past 30 minutes.
+ *
- * PROVISIONED
- We recommend using PROVISIONED
for predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
+ * PROVISIONED
- We recommend using PROVISIONED
for
+ * predictable workloads. PROVISIONED
sets the billing mode to Provisioned Mode.
- * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode to On-Demand Mode.
- *
+ * PAY_PER_REQUEST
- We recommend using PAY_PER_REQUEST
+ * for unpredictable workloads. PAY_PER_REQUEST
sets the billing mode
+ * to On-Demand Mode.
An array of one or more global secondary indexes for the table. For each index in the array, you can request one action:
- *An array of one or more global secondary indexes for the table. For each index in the + * array, you can request one action:
+ *
- * Create
- add a new global secondary index to the table.
+ * Create
- add a new global secondary index to the table.
- * Update
- modify the provisioned throughput settings of an existing global secondary index.
+ * Update
- modify the provisioned throughput settings of an existing
+ * global secondary index.
- * Delete
- remove a global secondary index from the table.
+ * Delete
- remove a global secondary index from the table.
You can create or delete only one global secondary index per UpdateTable
operation.
For more information, see Managing Global Secondary - * Indexes in the Amazon DynamoDB Developer Guide.
+ *You can create or delete only one global secondary index per UpdateTable
+ * operation.
For more information, see Managing Global + * Secondary Indexes in the Amazon DynamoDB Developer + * Guide.
*/ GlobalSecondaryIndexUpdates?: GlobalSecondaryIndexUpdate[]; /** *Represents the DynamoDB Streams configuration for the table.
- *You receive a You receive a ResourceInUseException
if you try to enable a stream on a
- * table that already has a stream, or if you try to disable a stream on a table that
+ * ResourceInUseException
if you try to enable a stream on
+ * a table that already has a stream, or if you try to disable a stream on a table that
* doesn't have a stream.
A list of replica update actions (create, delete, or update) for the table.
- *This property only applies to Version 2019.11.21 of global tables.
- *This property only applies to Version + * 2019.11.21 of global tables.
+ *Represents the auto scaling settings of a global secondary index for a global table - * that will be modified.
+ * that will be modified. */ export interface GlobalSecondaryIndexAutoScalingUpdate { /** @@ -5844,7 +6036,7 @@ export interface GlobalSecondaryIndexAutoScalingUpdate { /** *Represents the auto scaling settings to be modified for a global table or global - * secondary index.
+ * secondary index. */ ProvisionedWriteCapacityAutoScalingUpdate?: AutoScalingSettingsUpdate; } @@ -5859,8 +6051,8 @@ export namespace GlobalSecondaryIndexAutoScalingUpdate { } /** - *Represents the auto scaling settings of a global secondary index for a replica - * that will be modified.
+ *Represents the auto scaling settings of a global secondary index for a replica that + * will be modified.
*/ export interface ReplicaGlobalSecondaryIndexAutoScalingUpdate { /** @@ -5870,7 +6062,7 @@ export interface ReplicaGlobalSecondaryIndexAutoScalingUpdate { /** *Represents the auto scaling settings to be modified for a global table or global - * secondary index.
+ * secondary index. */ ProvisionedReadCapacityAutoScalingUpdate?: AutoScalingSettingsUpdate; } @@ -5894,14 +6086,14 @@ export interface ReplicaAutoScalingUpdate { RegionName: string | undefined; /** - *Represents the auto scaling settings of global secondary indexes that will - * be modified.
+ *Represents the auto scaling settings of global secondary indexes that will be + * modified.
*/ ReplicaGlobalSecondaryIndexUpdates?: ReplicaGlobalSecondaryIndexAutoScalingUpdate[]; /** *Represents the auto scaling settings to be modified for a global table or global - * secondary index.
+ * secondary index. */ ReplicaProvisionedReadCapacityAutoScalingUpdate?: AutoScalingSettingsUpdate; } @@ -5917,8 +6109,8 @@ export namespace ReplicaAutoScalingUpdate { export interface UpdateTableReplicaAutoScalingInput { /** - *Represents the auto scaling settings of the global secondary indexes of the replica - * to be updated.
+ *Represents the auto scaling settings of the global secondary indexes of the replica to + * be updated.
*/ GlobalSecondaryIndexUpdates?: GlobalSecondaryIndexAutoScalingUpdate[]; @@ -5929,13 +6121,13 @@ export interface UpdateTableReplicaAutoScalingInput { /** *Represents the auto scaling settings to be modified for a global table or global - * secondary index.
+ * secondary index. */ ProvisionedWriteCapacityAutoScalingUpdate?: AutoScalingSettingsUpdate; /** *Represents the auto scaling settings of replicas of the table that will be - * modified.
+ * modified. */ ReplicaUpdates?: ReplicaAutoScalingUpdate[]; } @@ -6001,7 +6193,8 @@ export interface UpdateTimeToLiveInput { TableName: string | undefined; /** - *Represents the settings used to enable or disable Time to Live for the specified table.
+ *Represents the settings used to enable or disable Time to Live for the specified + * table.
*/ TimeToLiveSpecification: TimeToLiveSpecification | undefined; } @@ -6033,9 +6226,10 @@ export namespace UpdateTimeToLiveOutput { /** *Represents the data for an attribute.
- *Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
- *For more information, see Data Types in the - * Amazon DynamoDB Developer Guide.
+ *Each attribute value is described as a name-value pair. The name is the data type, and + * the value is the data itself.
+ *For more information, see Data Types in the Amazon DynamoDB Developer + * Guide.
*/ export type AttributeValue = | AttributeValue.BMember @@ -6052,8 +6246,8 @@ export type AttributeValue = export namespace AttributeValue { /** - *An attribute of type String. For example:
- *+ *
An attribute of type String. For example:
+ *
* "S": "Hello"
*
An attribute of type Number. For example:
- *+ *
An attribute of type Number. For example:
+ *
* "N": "123.45"
*
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
+ *Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + * across languages and libraries. However, DynamoDB treats them as number type attributes + * for mathematical operations.
*/ export interface NMember { S?: never; @@ -6093,8 +6289,8 @@ export namespace AttributeValue { } /** - *An attribute of type Binary. For example:
- *+ *
An attribute of type Binary. For example:
+ *
* "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
*
An attribute of type String Set. For example:
- *+ *
An attribute of type String Set. For example:
+ *
* "SS": ["Giraffe", "Hippo" ,"Zebra"]
*
An attribute of type Number Set. For example:
- *+ *
An attribute of type Number Set. For example:
+ *
* "NS": ["42.2", "-19", "7.5", "3.14"]
*
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
+ *Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + * across languages and libraries. However, DynamoDB treats them as number type attributes + * for mathematical operations.
*/ export interface NSMember { S?: never; @@ -6154,8 +6352,8 @@ export namespace AttributeValue { } /** - *An attribute of type Binary Set. For example:
- *+ *
An attribute of type Binary Set. For example:
+ *
* "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
*
An attribute of type Map. For example:
- *+ *
An attribute of type Map. For example:
+ *
* "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
*
An attribute of type List. For example:
- *+ *
An attribute of type List. For example:
+ *
* "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}]
*
An attribute of type Null. For example:
- *+ *
An attribute of type Null. For example:
+ *
* "NULL": true
*
An attribute of type Boolean. For example:
- *+ *
An attribute of type Boolean. For example:
+ *
* "BOOL": true
*
For the UpdateItem
operation, represents the attributes to be modified, the action to
- * perform on each, and the new value for each.
You cannot use UpdateItem
to update any primary key attributes. Instead, you will
- * need to delete the item, and then use PutItem
to create a new item with new
- * attributes.
Attribute values cannot be null; string and binary type attributes must have lengths greater
- * than zero; and set type attributes must not be empty. Requests with empty values will be
- * rejected with a ValidationException
exception.
For the UpdateItem
operation, represents the attributes to be modified,
+ * the action to perform on each, and the new value for each.
You cannot use UpdateItem
to update any primary key attributes.
+ * Instead, you will need to delete the item, and then use PutItem
to
+ * create a new item with new attributes.
Attribute values cannot be null; string and binary type attributes must have lengths
+ * greater than zero; and set type attributes must not be empty. Requests with empty values
+ * will be rejected with a ValidationException
exception.
Represents the data for an attribute.
- *Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
- *For more information, see Data Types in the Amazon DynamoDB Developer Guide. - *
+ *Each attribute value is described as a name-value pair. The name is the data type, and + * the value is the data itself.
+ *For more information, see Data Types in the Amazon DynamoDB Developer Guide. + *
*/ Value?: AttributeValue; /** - *Specifies how to perform the update. Valid values are PUT
(default), DELETE
,
- * and ADD
. The behavior depends on whether the specified primary key already exists
- * in the table.
Specifies how to perform the update. Valid values are PUT
(default),
+ * DELETE
, and ADD
. The behavior depends on whether the
+ * specified primary key already exists in the table.
- * If an item with the specified Key is found in the table: - *
+ *+ * If an item with the specified Key is found in + * the table: + *
* - *
- * PUT
- Adds the specified attribute to the item. If the attribute already
- * exists, it is replaced by the new value.
+ * PUT
- Adds the specified attribute to the item. If the attribute
+ * already exists, it is replaced by the new value.
- * DELETE
- If no value is specified, the attribute and its value are removed
- * from the item. The data type of the specified value must match the existing value's data
- * type.
If a set of values is specified, then those values are subtracted from the old
- * set. For example, if the attribute value was the set [a,b,c]
and the
- * DELETE
action specified [a,c]
, then the final attribute value would
- * be [b]
. Specifying an empty set is an error.
+ * DELETE
- If no value is specified, the attribute and its value are
+ * removed from the item. The data type of the specified value must match the
+ * existing value's data type.
If a set of values is specified, then those values are
+ * subtracted from the old set. For example, if the attribute value was the set
+ * [a,b,c]
and the DELETE
action specified
+ * [a,c]
, then the final attribute value would be
+ * [b]
. Specifying an empty set is an error.
- * ADD
- If the attribute does not already exist, then the attribute and its
- * values are added to the item. If the attribute does exist, then the behavior of
- * ADD
depends on the data type of the attribute:
+ * ADD
- If the attribute does not already exist, then the attribute
+ * and its values are added to the item. If the attribute does exist, then the
+ * behavior of ADD
depends on the data type of the attribute:
If the existing attribute is a number, and if Value
is also a number, then the
- * Value
is mathematically added to the existing attribute. If Value
is a
- * negative number, then it is subtracted from the existing attribute.
If you use ADD
to increment or decrement a number value for an item
- * that doesn't exist before the update, DynamoDB uses 0 as the initial value.
In addition, if you use ADD
to update an existing item, and intend to
- * increment or decrement an attribute value which does not yet exist, DynamoDB uses
- * 0
as the initial value. For example, suppose that the item you want
- * to update does not yet have an attribute named itemcount, but you decide to
- * ADD
the number 3
to this attribute anyway, even though
- * it currently does not exist. DynamoDB will create the itemcount attribute, set
- * its initial value to 0
, and finally add 3
to it. The
- * result will be a new itemcount attribute in the item, with a value of
- * 3
.
If the existing attribute is a number, and if Value
is
+ * also a number, then the Value
is mathematically added to
+ * the existing attribute. If Value
is a negative number, then
+ * it is subtracted from the existing attribute.
If you use ADD
to increment or decrement a number
+ * value for an item that doesn't exist before the update, DynamoDB
+ * uses 0 as the initial value.
In addition, if you use ADD
to update an existing
+ * item, and intend to increment or decrement an attribute value which
+ * does not yet exist, DynamoDB uses 0
as the initial
+ * value. For example, suppose that the item you want to update does
+ * not yet have an attribute named itemcount, but
+ * you decide to ADD
the number 3
to this
+ * attribute anyway, even though it currently does not exist. DynamoDB
+ * will create the itemcount attribute, set its
+ * initial value to 0
, and finally add 3
to
+ * it. The result will be a new itemcount
+ * attribute in the item, with a value of 3
.
If the existing data type is a set, and if the Value
is also a set, then the
- * Value
is added to the existing set. (This is a set operation, not
- * mathematical addition.) For example, if the attribute value was the set
- * [1,2]
, and the ADD
action specified [3]
, then
- * the final attribute value would be [1,2,3]
. An error occurs if an Add
- * action is specified for a set attribute and the attribute type specified does not
- * match the existing set type.
Both sets must have the same primitive data type. For example, if the existing data
- * type is a set of strings, the Value
must also be a set of strings. The same
- * holds true for number sets and binary sets.
If the existing data type is a set, and if the Value
is
+ * also a set, then the Value
is added to the existing set.
+ * (This is a set operation, not mathematical
+ * addition.) For example, if the attribute value was the set
+ * [1,2]
, and the ADD
action specified
+ * [3]
, then the final attribute value would be
+ * [1,2,3]
. An error occurs if an Add action is specified
+ * for a set attribute and the attribute type specified does not match the
+ * existing set type.
Both sets must have the same primitive data type. For example, if the
+ * existing data type is a set of strings, the Value
must also
+ * be a set of strings. The same holds true for number sets and binary
+ * sets.
This action is only valid for an existing attribute whose data type is number or is a
- * set. Do not use ADD
for any other data types.
This action is only valid for an existing attribute whose data type is number
+ * or is a set. Do not use ADD
for any other data types.
- * If no item with the specified Key is found: - *
+ *+ * If no item with the specified Key is + * found: + *
* - *
- * PUT
- DynamoDB creates a new item with the specified primary key, and then adds
- * the attribute.
+ * PUT
- DynamoDB creates a new item with the specified primary key,
+ * and then adds the attribute.
- * DELETE
- Nothing happens; there is no attribute to delete.
+ * DELETE
- Nothing happens; there is no attribute to delete.
- * ADD
- DynamoDB creates an item with the supplied primary key and number (or set
- * of numbers) for the attribute value. The only data types allowed are number and number
- * set; no other data types can be specified.
+ * ADD
- DynamoDB creates an item with the supplied primary key and
+ * number (or set of numbers) for the attribute value. The only data types allowed
+ * are number and number set; no other data types can be specified.
- * A PartiQL batch statement request. - *
+ *A PartiQL batch statement request.
*/ export interface BatchStatementRequest { /** - *- * A valid PartiQL statement. - *
+ *A valid PartiQL statement.
*/ Statement: string | undefined; /** - *- * The parameters associated with a PartiQL statement in the batch request. - *
+ *The parameters associated with a PartiQL statement in the batch request.
*/ Parameters?: AttributeValue[]; /** - *- * The read consistency of the PartiQL batch request. - *
+ *The read consistency of the PartiQL batch request.
*/ ConsistentRead?: boolean; } @@ -6484,29 +6685,21 @@ export namespace BatchStatementRequest { } /** - *- * A PartiQL batch statement response.. - *
+ *A PartiQL batch statement response..
*/ export interface BatchStatementResponse { /** - *- * The error associated with a failed PartiQL batch statement. - *
+ *The error associated with a failed PartiQL batch statement.
*/ Error?: BatchStatementError; /** - *- * The table name associated with a failed PartiQL batch statement. - *
+ *The table name associated with a failed PartiQL batch statement.
*/ TableName?: string; /** - *- * A DynamoDB item associated with a BatchStatementResponse - *
+ *A DynamoDB item associated with a BatchStatementResponse
*/ Item?: { [key: string]: AttributeValue }; } @@ -6530,11 +6723,10 @@ export namespace BatchStatementResponse { } /** - *An ordered list of errors for each item in the request which caused the transaction
- * to get cancelled. The values of the list are ordered according to the ordering of the
- * TransactWriteItems
request parameter. If no error
- * occurred for the associated item an error with a Null code and Null message will be present.
- *
An ordered list of errors for each item in the request which caused the transaction to
+ * get cancelled. The values of the list are ordered according to the ordering of the
+ * TransactWriteItems
request parameter. If no error occurred for the
+ * associated item an error with a Null code and Null message will be present.
Represents the selection criteria for a Query
or Scan
operation:
Represents the selection criteria for a Query
or Scan
+ * operation:
For a Query
operation, Condition
is used for specifying the
- * KeyConditions
to use when querying a table or an index. For KeyConditions
,
- * only the following comparison operators are supported:
- * EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN
- *
- * Condition
is also used in a QueryFilter
, which evaluates the query results
- * and returns only the desired values.
For a Query
operation, Condition
is used for
+ * specifying the KeyConditions
to use when querying a table or an
+ * index. For KeyConditions
, only the following comparison operators
+ * are supported:
+ * EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN
+ *
+ * Condition
is also used in a QueryFilter
, which
+ * evaluates the query results and returns only the desired values.
For a Scan
operation, Condition
is used in a ScanFilter
, which
- * evaluates the scan results and returns only the desired values.
For a Scan
operation, Condition
is used in a
+ * ScanFilter
, which evaluates the scan results and returns only
+ * the desired values.
One or more values to evaluate against the supplied attribute. The number of values in the
- * list depends on the ComparisonOperator
being used.
For type Number, value comparisons are numeric.
- *String value comparisons for greater than, equals, or less than are based on ASCII character
- * code values. For example, a
is greater than A
, and a
- * is greater than B
. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
+ *One or more values to evaluate against the supplied attribute. The number of values in
+ * the list depends on the ComparisonOperator
being used.
For type Number, value comparisons are numeric.
+ *String value comparisons for greater than, equals, or less than are based on ASCII
+ * character code values. For example, a
is greater than A
, and
+ * a
is greater than B
. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.
For Binary, DynamoDB treats each byte of the binary data as unsigned when it + * compares binary values.
*/ AttributeValueList?: AttributeValue[]; /** - *A comparator for evaluating attributes. For example, equals, greater than, less than, etc.
- *The following comparison operators are available:
- *
- * EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
- *
The following are descriptions of each comparison operator.
- *
- * EQ
: Equal. EQ
is supported for all data types, including lists and maps.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not equal {"NS":["6", "2", "1"]}
.
- * NE
: Not equal. NE
is supported for all data types, including lists and maps.
- * AttributeValueList
can contain only one AttributeValue
of type String,
- * Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue
of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not equal {"NS":["6", "2", "1"]}
.
- * LE
: Less than or equal.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * LT
: Less than.
- * AttributeValueList
can contain only one AttributeValue
of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * GE
: Greater than or equal.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * GT
: Greater than.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * NOT_NULL
: The attribute exists. NOT_NULL
is supported for all data types, including lists and maps.
This operator tests for the existence of an attribute, not its data type. If the data type of attribute "a
" is null, and you evaluate it using NOT_NULL
, the result is a Boolean true
. This result is because the attribute "a
" exists; its data type is not relevant to the NOT_NULL
comparison operator.
- * NULL
: The attribute does not exist. NULL
is supported for all data types, including lists and maps.
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute "a
" is null, and you evaluate it using NULL
, the result is a Boolean false
. This is because the attribute "a
" exists; its data type is not relevant to the NULL
comparison operator.
- * CONTAINS
: Checks for a subsequence, or value in a set.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then
- * the operator checks for a substring match. If the target attribute of the comparison is
- * of type Binary, then the operator looks for a subsequence of the target that matches the input.
- * If the target attribute of the comparison is a set ("SS
", "NS
", or "BS
"), then the
- * operator evaluates to true if it finds an exact match with any member of the set.
CONTAINS is supported for lists: When evaluating "a CONTAINS b
", "a
" can be a list; however, "b
" cannot be a set, a map, or a list.
- * NOT_CONTAINS
: Checks for absence of a subsequence, or absence of a value in
- * a set.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If the target attribute of the comparison is a String, then
- * the operator checks for the absence of a substring match. If the target attribute of the
- * comparison is Binary, then the operator checks for the absence of a subsequence of the
- * target that matches the input. If the target attribute of the comparison is a set ("SS
",
- * "NS
", or "BS
"), then the operator evaluates to true if it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b
", "a
" can be a list; however, "b
" cannot be a set, a map, or a list.
- * BEGINS_WITH
: Checks for a prefix.
- * AttributeValueList
can contain only one AttributeValue
of type String or
- * Binary (not a Number or a set type). The target attribute of the comparison must be of type String or
- * Binary (not a Number or a set type).
- * IN
: Checks for matching elements in a list.
- * AttributeValueList
can contain one or more AttributeValue
- * elements of type String, Number, or Binary. These attributes are compared against an
- * existing attribute of an item. If any elements of the input are equal to the item
- * attribute, the expression evaluates to true.
- * BETWEEN
: Greater than or equal to the first value, and less than or equal
- * to the second value.
- * AttributeValueList
must contain two AttributeValue
elements of the same
- * type, either String, Number, or Binary (not a set type). A target attribute matches if the
- * target value is greater than, or equal to, the first element and less than, or equal to,
- * the second element. If an item contains an AttributeValue
element of a different type than
- * the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not compare to {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
- *
A comparator for evaluating attributes. For example, equals, greater than, less than, + * etc.
+ *The following comparison operators are available:
+ *
+ * EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ * BEGINS_WITH | IN | BETWEEN
+ *
The following are descriptions of each comparison operator.
+ *
+ * EQ
: Equal. EQ
is supported for all data types,
+ * including lists and maps.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, Binary, String Set, Number Set, or Binary Set.
+ * If an item contains an AttributeValue
element of a different type
+ * than the one provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not equal {"NS":["6", "2",
+ * "1"]}
.
+ * NE
: Not equal. NE
is supported for all data types,
+ * including lists and maps.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String, Number, Binary, String Set, Number Set, or Binary Set. If an
+ * item contains an AttributeValue
of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not equal {"NS":["6", "2",
+ * "1"]}
.
+ * LE
: Less than or equal.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * LT
: Less than.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String, Number, or Binary (not a set type). If an item contains an
+ * AttributeValue
element of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not compare to {"NS":["6", "2",
+ * "1"]}
.
+ * GE
: Greater than or equal.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * GT
: Greater than.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * NOT_NULL
: The attribute exists. NOT_NULL
is supported
+ * for all data types, including lists and maps.
This operator tests for the existence of an attribute, not its data type.
+ * If the data type of attribute "a
" is null, and you evaluate it
+ * using NOT_NULL
, the result is a Boolean true
. This
+ * result is because the attribute "a
" exists; its data type is
+ * not relevant to the NOT_NULL
comparison operator.
+ * NULL
: The attribute does not exist. NULL
is supported
+ * for all data types, including lists and maps.
This operator tests for the nonexistence of an attribute, not its data
+ * type. If the data type of attribute "a
" is null, and you
+ * evaluate it using NULL
, the result is a Boolean
+ * false
. This is because the attribute "a
"
+ * exists; its data type is not relevant to the NULL
comparison
+ * operator.
+ * CONTAINS
: Checks for a subsequence, or value in a set.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If the target
+ * attribute of the comparison is of type String, then the operator checks for a
+ * substring match. If the target attribute of the comparison is of type Binary,
+ * then the operator looks for a subsequence of the target that matches the input.
+ * If the target attribute of the comparison is a set ("SS
",
+ * "NS
", or "BS
"), then the operator evaluates to
+ * true if it finds an exact match with any member of the set.
CONTAINS is supported for lists: When evaluating "a CONTAINS b
",
+ * "a
" can be a list; however, "b
" cannot be a set, a
+ * map, or a list.
+ * NOT_CONTAINS
: Checks for absence of a subsequence, or absence of a
+ * value in a set.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If the target
+ * attribute of the comparison is a String, then the operator checks for the
+ * absence of a substring match. If the target attribute of the comparison is
+ * Binary, then the operator checks for the absence of a subsequence of the target
+ * that matches the input. If the target attribute of the comparison is a set
+ * ("SS
", "NS
", or "BS
"), then the
+ * operator evaluates to true if it does not find an exact
+ * match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS
+ * b
", "a
" can be a list; however, "b
" cannot
+ * be a set, a map, or a list.
+ * BEGINS_WITH
: Checks for a prefix.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String or Binary (not a Number or a set type). The target attribute of
+ * the comparison must be of type String or Binary (not a Number or a set
+ * type).
+ * IN
: Checks for matching elements in a list.
+ * AttributeValueList
can contain one or more
+ * AttributeValue
elements of type String, Number, or Binary.
+ * These attributes are compared against an existing attribute of an item. If any
+ * elements of the input are equal to the item attribute, the expression evaluates
+ * to true.
+ * BETWEEN
: Greater than or equal to the first value, and less than
+ * or equal to the second value.
+ * AttributeValueList
must contain two AttributeValue
+ * elements of the same type, either String, Number, or Binary (not a set type). A
+ * target attribute matches if the target value is greater than, or equal to, the
+ * first element and less than, or equal to, the second element. If an item
+ * contains an AttributeValue
element of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not compare to {"N":"6"}
. Also,
+ * {"N":"6"}
does not compare to {"NS":["6", "2",
+ * "1"]}
+ *
For usage examples of AttributeValueList
and ComparisonOperator
, see
- * Legacy Conditional Parameters
- * in the Amazon DynamoDB Developer Guide.
For usage examples of AttributeValueList
and
+ * ComparisonOperator
, see Legacy
+ * Conditional Parameters in the Amazon DynamoDB Developer
+ * Guide.
A map of attribute name to attribute values, representing the primary key of the item to delete. All of the table's primary key attributes must be specified, and their data types must match those of the table's key schema.
+ *A map of attribute name to attribute values, representing the primary key of the item + * to delete. All of the table's primary key attributes must be specified, and their data + * types must match those of the table's key schema.
*/ Key: { [key: string]: AttributeValue } | undefined; } @@ -6798,30 +7034,24 @@ export namespace DeleteRequest { export interface ExecuteStatementInput { /** - *- * The PartiQL statement representing the operation to run. - *
+ *The PartiQL statement representing the operation to run.
*/ Statement: string | undefined; /** - *- * The parameters for the PartiQL statement, if any. - *
+ *The parameters for the PartiQL statement, if any.
*/ Parameters?: AttributeValue[]; /** - *
- * The consistency of a read operation. If set to true
, then a strongly consistent read is used; otherwise, an eventually consistent read is used.
- *
The consistency of a read operation. If set to true
, then a strongly
+ * consistent read is used; otherwise, an eventually consistent read is used.
- * Set this value to get remaining results, if NextToken
was returned in the statement response.
- *
Set this value to get remaining results, if NextToken
was returned in the
+ * statement response.
Specifies an item and related attribute values to retrieve in a
- * TransactGetItem
object.
TransactGetItem
object.
*/
export interface Get {
/**
- * A map of attribute names to AttributeValue
objects that
- * specifies the primary key of the item to retrieve.
A map of attribute names to AttributeValue
objects that specifies the
+ * primary key of the item to retrieve.
A string that identifies one or more attributes of the specified item - * to retrieve from the table. The attributes in the expression must be - * separated by commas. If no attribute names are specified, then all - * attributes of the specified item are returned. If any of the requested - * attributes are not found, they do not appear in the result.
+ *A string that identifies one or more attributes of the specified item to retrieve from + * the table. The attributes in the expression must be separated by commas. If no attribute + * names are specified, then all attributes of the specified item are returned. If any of + * the requested attributes are not found, they do not appear in the result.
*/ ProjectionExpression?: string; /** - *One or more substitution tokens for attribute names in the - * ProjectionExpression parameter.
+ *One or more substitution tokens for attribute names in the ProjectionExpression + * parameter.
*/ ExpressionAttributeNames?: { [key: string]: string }; } @@ -6896,93 +7125,120 @@ export interface GetItemInput { TableName: string | undefined; /** - *A map of attribute names to AttributeValue
objects, representing the primary key of
- * the item to retrieve.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
+ *A map of attribute names to AttributeValue
objects, representing the
+ * primary key of the item to retrieve.
For the primary key, you must provide all of the attributes. For example, with a + * simple primary key, you only need to provide a value for the partition key. For a + * composite primary key, you must provide values for both the partition key and the sort + * key.
*/ Key: { [key: string]: AttributeValue } | undefined; /** - *This is a legacy parameter. Use ProjectionExpression
instead. For more information, see
- * AttributesToGet in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ProjectionExpression
instead. For more
+ * information, see AttributesToGet in the Amazon DynamoDB Developer
+ * Guide.
Determines the read consistency model: If set to true
, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.
Determines the read consistency model: If set to true
, then the operation
+ * uses strongly consistent reads; otherwise, the operation uses eventually consistent
+ * reads.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.
- *If no attribute names are specified, then all attributes are returned. If any of the + *
A string that identifies one or more attributes to retrieve from the table. These + * attributes can include scalars, sets, or elements of a JSON document. The attributes in + * the expression must be separated by commas.
+ *If no attribute names are specified, then all attributes are returned. If any of the * requested attributes are not found, they do not appear in the result.
- *For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer + *
For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ProjectionExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; @@ -7011,8 +7267,8 @@ export namespace GetItemInput { */ export interface GetItemOutput { /** - *A map of attribute names to AttributeValue
objects, as specified
- * by ProjectionExpression
.
A map of attribute names to AttributeValue
objects, as specified by
+ * ProjectionExpression
.
Information about item collections, if any, that were affected by the operation.
- * ItemCollectionMetrics
is only returned if the request asked for it. If the
- * table does not have any local secondary indexes, this information is not returned in the response.
ItemCollectionMetrics
is only returned if the request asked for it. If
+ * the table does not have any local secondary indexes, this information is not returned in
+ * the response.
*/
export interface ItemCollectionMetrics {
/**
- * The partition key value of the item collection. This value is the same as the partition key value of the item.
+ *The partition key value of the item collection. This value is the same as the + * partition key value of the item.
*/ ItemCollectionKey?: { [key: string]: AttributeValue }; /** - *An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.
- *The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
+ *An estimate of item collection size, in gigabytes. This value is a two-element array + * containing a lower bound and an upper bound for the estimate. The estimate includes the + * size of all the items in the table, plus the size of all attributes projected into all + * of the local secondary indexes on that table. Use this estimate to measure whether a + * local secondary index is approaching its size limit.
+ *The estimate is subject to change over time; therefore, do not rely on the precision + * or accuracy of the estimate.
*/ SizeEstimateRangeGB?: number[]; } @@ -7110,22 +7373,16 @@ export namespace ItemResponse { } /** - *- * Represents a PartiQL statment that uses parameters. - *
+ *Represents a PartiQL statment that uses parameters.
*/ export interface ParameterizedStatement { /** - *- * A PartiQL statment that uses parameters. - *
+ *A PartiQL statment that uses parameters.
*/ Statement: string | undefined; /** - *- * The parameter values. - *
+ *The parameter values.
*/ Parameters?: AttributeValue[]; } @@ -7145,11 +7402,11 @@ export namespace ParameterizedStatement { */ export interface PutRequest { /** - *A map of attribute name to attribute values, representing the primary key of an item to
- * be processed by PutItem
. All of the table's primary key attributes must be
- * specified, and their data types must match those of the table's key schema. If any
- * attributes are present in the item that are part of an index key schema for the table,
- * their types must match the index key schema.
A map of attribute name to attribute values, representing the primary key of an item
+ * to be processed by PutItem
. All of the table's primary key attributes must
+ * be specified, and their data types must match those of the table's key schema. If any
+ * attributes are present in the item that are part of an index key schema for the table,
+ * their types must match the index key schema.
- * If a read operation was used, this property will contain the result of the reade operation; a map of attribute names and their values. For the write operations this value will be empty. - *
+ *If a read operation was used, this property will contain the result of the read + * operation; a map of attribute names and their values. For the write operations this + * value will be empty.
*/ Items?: { [key: string]: AttributeValue }[]; /** - *- * If the response of a read request exceeds the response payload limit DynamoDB will set this value in the response. If set, you can use that this value in the subsequent request to get the remaining results. - *
+ *If the response of a read request exceeds the response payload limit DynamoDB will set + * this value in the response. If set, you can use that this value in the subsequent + * request to get the remaining results.
*/ NextToken?: string; } @@ -7209,79 +7466,97 @@ export namespace ExecuteStatementOutput { } /** - *Represents a set of primary keys and, for each key, the attributes to retrieve from the table.
- *For each primary key, you must provide all of the key attributes. For example, with a - * simple primary key, you only need to provide the partition key. For a composite - * primary key, you must provide both the partition key and the sort key.
+ *Represents a set of primary keys and, for each key, the attributes to retrieve from + * the table.
+ *For each primary key, you must provide all of the key attributes. + * For example, with a simple primary key, you only need to provide the partition key. For + * a composite primary key, you must provide both the partition key + * and the sort key.
*/ export interface KeysAndAttributes { /** - *The primary key attribute values that define the items and the attributes associated with the items.
+ *The primary key attribute values that define the items and the attributes associated + * with the items.
*/ Keys: { [key: string]: AttributeValue }[] | undefined; /** - *This is a legacy parameter. Use ProjectionExpression
instead. For more information, see
- * Legacy Conditional Parameters in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ProjectionExpression
instead. For more
+ * information, see Legacy
+ * Conditional Parameters in the Amazon DynamoDB Developer
+ * Guide.
The consistency of a read operation. If set to true
, then a strongly consistent
- * read is used; otherwise, an eventually consistent read is used.
The consistency of a read operation. If set to true
, then a strongly
+ * consistent read is used; otherwise, an eventually consistent read is used.
A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars,
- * sets, or elements of a JSON document. The attributes in the ProjectionExpression
must be separated by
- * commas.
If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.
- *For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.
+ *A string that identifies one or more attributes to retrieve from the table. These
+ * attributes can include scalars, sets, or elements of a JSON document. The attributes in
+ * the ProjectionExpression
must be separated by commas.
If no attribute names are specified, then all attributes will be returned. If any of + * the requested attributes are not found, they will not appear in the result.
+ *For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer + * Guide.
*/ ProjectionExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer Guide.
+ *Tokens that begin with the : character are + * expression attribute values, which are placeholders for the + * actual value at runtime.
+ *For more information on expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB Developer + * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; } @@ -7311,9 +7586,9 @@ export namespace KeysAndAttributes { */ export interface TransactGetItem { /** - *Contains the primary key that identifies the item to get, together - * with the name of the table that contains the item, and optionally - * the specific attributes of the item to retrieve.
+ *Contains the primary key that identifies the item to get, together with the name of + * the table that contains the item, and optionally the specific attributes of the item to + * retrieve.
*/ Get: Get | undefined; } @@ -7330,9 +7605,7 @@ export namespace TransactGetItem { export interface BatchExecuteStatementInput { /** - *- * The list of PartiQL statements representing the batch to run. - *
+ *The list of PartiQL statements representing the batch to run.
*/ Statements: BatchStatementRequest[] | undefined; } @@ -7349,9 +7622,7 @@ export namespace BatchExecuteStatementInput { export interface BatchExecuteStatementOutput { /** - *- * The response to each PartiQL statement in the batch. - *
+ *The response to each PartiQL statement in the batch.
*/ Responses?: BatchStatementResponse[]; } @@ -7368,16 +7639,13 @@ export namespace BatchExecuteStatementOutput { export interface ExecuteTransactionInput { /** - *- * The list of PartiQL statements representing the transaction to run. - *
+ *The list of PartiQL statements representing the transaction to run.
*/ TransactStatements: ParameterizedStatement[] | undefined; /** - *
- * Set this value to get remaining results, if NextToken
was returned in the statement response.
- *
Set this value to get remaining results, if NextToken
was returned in the
+ * statement response.
- * The response to a PartiQL transaction. - *
+ *The response to a PartiQL transaction.
*/ Responses?: ItemResponse[]; } @@ -7415,23 +7681,23 @@ export namespace ExecuteTransactionOutput { export interface TransactGetItemsOutput { /** - *If the ReturnConsumedCapacity value was TOTAL
,
- * this is an array of ConsumedCapacity
objects, one for each table
- * addressed by TransactGetItem
objects in the TransactItems
- * parameter. These ConsumedCapacity
objects report the read-capacity
- * units consumed by the TransactGetItems
call in that table.
If the ReturnConsumedCapacity value was TOTAL
, this
+ * is an array of ConsumedCapacity
objects, one for each table addressed by
+ * TransactGetItem
objects in the TransactItems
+ * parameter. These ConsumedCapacity
objects report the read-capacity units
+ * consumed by the TransactGetItems
call in that table.
An ordered array of up to 25 ItemResponse
objects, each of which corresponds
- * to the TransactGetItem
object in the same position in the
- * TransactItems array. Each ItemResponse
object
- * contains a Map of the name-value pairs that are the projected attributes of
- * the requested item.
If a requested item could not be retrieved, the corresponding ItemResponse
- * object is Null, or if the requested item has no projected attributes, the corresponding
- * ItemResponse
object is an empty Map.
An ordered array of up to 25 ItemResponse
objects, each of which
+ * corresponds to the TransactGetItem
object in the same position in the
+ * TransactItems array. Each ItemResponse
object
+ * contains a Map of the name-value pairs that are the projected attributes of the
+ * requested item.
If a requested item could not be retrieved, the corresponding
+ * ItemResponse
object is Null, or if the requested item has no projected
+ * attributes, the corresponding ItemResponse
object is an empty Map.
The entire transaction request was canceled.
- *DynamoDB cancels a TransactWriteItems
request under the following circumstances:
DynamoDB cancels a TransactWriteItems
request under the following
+ * circumstances:
A condition in one of the condition expressions is not met.
+ *A condition in one of the condition expressions is not met.
*A table in the TransactWriteItems
request is in a different account or region.
A table in the TransactWriteItems
request is in a different
+ * account or region.
More than one action in the TransactWriteItems
operation targets the same item.
More than one action in the TransactWriteItems
operation
+ * targets the same item.
There is insufficient provisioned capacity for the transaction to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*An item size becomes too large (larger than 400 KB), or a local secondary index (LSI) - * becomes too large, or a similar validation error occurs because of changes made by the transaction.
+ *An item size becomes too large (larger than 400 KB), or a local secondary + * index (LSI) becomes too large, or a similar validation error occurs because of + * changes made by the transaction.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*DynamoDB cancels a TransactGetItems
request under the following circumstances:
DynamoDB cancels a TransactGetItems
request under the
+ * following circumstances:
There is an ongoing TransactGetItems
operation that conflicts with a concurrent
- * PutItem
, UpdateItem
, DeleteItem
or TransactWriteItems
request.
- * In this case the TransactGetItems
operation fails with a TransactionCanceledException
.
There is an ongoing TransactGetItems
operation that conflicts
+ * with a concurrent PutItem
, UpdateItem
,
+ * DeleteItem
or TransactWriteItems
request. In this
+ * case the TransactGetItems
operation fails with a
+ * TransactionCanceledException
.
A table in the TransactGetItems
request is in a different account or region.
A table in the TransactGetItems
request is in a different
+ * account or region.
There is insufficient provisioned capacity for the transaction to be completed.
+ *There is insufficient provisioned capacity for the transaction to be + * completed.
*There is a user error, such as an invalid data format.
+ *There is a user error, such as an invalid data format.
*If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
property. This property is not set for other languages.
- * Transaction cancellation reasons are ordered in the order of requested items, if an item has no error it will have NONE
code and Null
message.
Cancellation reason codes and possible error messages:
- *If using Java, DynamoDB lists the cancellation reasons on the
+ * CancellationReasons
property. This property is not set for other
+ * languages. Transaction cancellation reasons are ordered in the order of requested
+ * items, if an item has no error it will have NONE
code and
+ * Null
message.
Cancellation reason codes and possible error messages:
+ *No Errors:
- *No Errors:
+ *Code: NONE
- *
Code: NONE
+ *
Message: null
- *
Message: null
+ *
Conditional Check Failed:
- *Conditional Check Failed:
+ *Code: ConditionalCheckFailed
- *
Code: ConditionalCheckFailed
+ *
Message: The conditional request failed.
- *Message: The conditional request failed.
+ * *Item Collection Size Limit Exceeded:
- *Item Collection Size Limit Exceeded:
+ *Code: ItemCollectionSizeLimitExceeded
- *
Code: ItemCollectionSizeLimitExceeded
+ *
Message: Collection size exceeded.
- *Message: Collection size exceeded.
+ * *Transaction Conflict:
- *Transaction Conflict:
+ *Code: TransactionConflict
- *
Code: TransactionConflict
+ *
Message: Transaction is ongoing for the item.
- *Message: Transaction is ongoing for the item.
+ * *Provisioned Throughput Exceeded:
- *Provisioned Throughput Exceeded:
+ *Code: ProvisionedThroughputExceeded
- *
Code: ProvisionedThroughputExceeded
+ *
Messages:
- *Messages:
+ *The level of configured provisioned throughput for the table was exceeded. Consider increasing your provisioning level with the - * UpdateTable API.
- *This Message is received when provisioned throughput is exceeded is on a provisioned DynamoDB table.
- *The level of configured provisioned throughput for the + * table was exceeded. Consider increasing your provisioning level + * with the UpdateTable API.
+ *This Message is received when provisioned throughput is + * exceeded is on a provisioned DynamoDB + * table.
+ *The level of configured provisioned throughput for one or more global secondary indexes of the table was exceeded. - * Consider increasing your provisioning level for the under-provisioned global secondary indexes with the UpdateTable API.
- *This message is returned when provisioned throughput is exceeded is on a provisioned GSI.
- *The level of configured provisioned throughput for one or + * more global secondary indexes of the table was exceeded. + * Consider increasing your provisioning level for the + * under-provisioned global secondary indexes with the UpdateTable + * API.
+ *This message is returned when provisioned throughput is + * exceeded is on a provisioned GSI.
+ *Throttling Error:
- *Throttling Error:
+ *Code: ThrottlingError
- *
Code: ThrottlingError
+ *
Messages:
- *Messages:
+ *Throughput exceeds the current capacity of your table or index. - * DynamoDB is automatically scaling your table or index so please try - * again shortly. If exceptions persist, check if you have a hot key: - * https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
- *This message is returned when writes get throttled on an On-Demand table as DynamoDB is automatically scaling the table.
- *Throughput exceeds the current capacity of your table or + * index. DynamoDB is automatically scaling your table or + * index so please try again shortly. If exceptions persist, check + * if you have a hot key: + * https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
+ *This message is returned when writes get throttled on an + * On-Demand table as DynamoDB is automatically + * scaling the table.
+ *Throughput exceeds the current capacity for one or more global secondary indexes. DynamoDB is automatically scaling your index so - * please try again shortly.
- *This message is returned when when writes get throttled on an On-Demand GSI as DynamoDB is automatically scaling the GSI.
- *Throughput exceeds the current capacity for one or more + * global secondary indexes. DynamoDB is automatically + * scaling your index so please try again shortly.
+ *This message is returned when when writes get throttled on + * an On-Demand GSI as DynamoDB is automatically + * scaling the GSI.
+ *Validation Error:
- *Validation Error:
+ *Code: ValidationError
- *
Code: ValidationError
+ *
Messages:
- *Messages:
+ *One or more parameter values were invalid.
- *One or more parameter values were invalid.
+ *The update expression attempted to update the secondary index key beyond allowed size limits.
- *The update expression attempted to update the secondary + * index key beyond allowed size limits.
+ * *The update expression attempted to update the secondary index key to unsupported type.
- *The update expression attempted to update the secondary + * index key to unsupported type.
+ * *An operand in the update expression has an incorrect data type.
- *An operand in the update expression has an incorrect data + * type.
+ * *Item size to update has exceeded the maximum allowed size.
- *Item size to update has exceeded the maximum allowed + * size.
+ * *Number overflow. Attempting to store a number with magnitude larger than supported range.
- *Number overflow. Attempting to store a number with + * magnitude larger than supported range.
+ * *Type mismatch for attribute to update.
- *Type mismatch for attribute to update.
+ * *Nesting Levels have exceeded supported limits.
- *Nesting Levels have exceeded supported limits.
+ * *The document path provided in the update expression is invalid for update.
- *The document path provided in the update expression is + * invalid for update.
+ * *The provided expression refers to an attribute that does not exist in the item.
- *The provided expression refers to an attribute that does + * not exist in the item.
+ * *A map of one or more table names and, for each table, a map that describes one or more items to retrieve from that table. Each table name can be used only once per BatchGetItem
request.
Each element in the map of items to retrieve consists of the following:
- *A map of one or more table names and, for each table, a map that describes one or more
+ * items to retrieve from that table. Each table name can be used only once per
+ * BatchGetItem
request.
Each element in the map of items to retrieve consists of the following:
+ *
- * ConsistentRead
- If true
, a strongly consistent read is used; if
- * false
(the default), an eventually consistent read is used.
+ * ConsistentRead
- If true
, a strongly consistent read
+ * is used; if false
(the default), an eventually consistent read is
+ * used.
- * ExpressionAttributeNames
- One or more substitution tokens for attribute names in the ProjectionExpression
parameter. The following are some use cases for using ExpressionAttributeNames
:
+ * ExpressionAttributeNames
- One or more substitution tokens for
+ * attribute names in the ProjectionExpression
parameter. The
+ * following are some use cases for using
+ * ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
- *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
+ *To create a placeholder for repeating occurrences of an attribute name in an expression.
- *To create a placeholder for repeating occurrences of an attribute name + * in an expression.
+ * *To prevent special characters in an attribute name from being misinterpreted in an expression.
- *To prevent special characters in an attribute name from being + * misinterpreted in an expression.
+ * *Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to + * dereference an attribute name. For example, consider the following attribute + * name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be
+ * used directly in an expression. (For the complete list of reserved words, see
+ * Reserved
+ * Words in the Amazon DynamoDB Developer Guide).
+ * To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this + * example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB
+ * Tokens that begin with the : character
+ * are expression attribute values, which are placeholders
+ * for the actual value at runtime.
For more information about expression attribute names, see Accessing Item Attributes in the Amazon DynamoDB * Developer Guide.
* *
- * Keys
- An array of primary key attribute values that define specific items in the
- * table. For each primary key, you must provide all of the key attributes. For
- * example, with a simple primary key, you only need to provide the partition key value. For a
- * composite key, you must provide both the partition key value and the sort key value.
+ * Keys
- An array of primary key attribute values that define
+ * specific items in the table. For each primary key, you must provide
+ * all of the key attributes. For example, with a simple
+ * primary key, you only need to provide the partition key value. For a composite
+ * key, you must provide both the partition key value and the
+ * sort key value.
@@ -7746,15 +8066,14 @@ export interface BatchGetItemInput { *
If no attribute names are specified, then all attributes are returned. If any * of the requested attributes are not found, they do not appear in the * result.
- *For more information, see - * Accessing Item Attributes in the Amazon DynamoDB Developer Guide.
+ *For more information, see Accessing Item Attributes in the Amazon DynamoDB + * Developer Guide.
*
- * AttributesToGet
- This is a legacy parameter. Use ProjectionExpression
instead. For more information, see
- * AttributesToGet in the Amazon DynamoDB Developer Guide.
- *
- *
+ * AttributesToGet
- This is a legacy parameter. Use
+ * ProjectionExpression
instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer
+ * Guide.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Represents a condition to be compared with an attribute value. This condition can be
- * used with DeleteItem
, PutItem
, or UpdateItem
- * operations; if the comparison evaluates to true, the operation succeeds; if not, the
- * operation fails. You can use ExpectedAttributeValue
in one of two different
- * ways:
DeleteItem
, PutItem
, or UpdateItem
+ * operations; if the comparison evaluates to true, the operation succeeds; if not, the
+ * operation fails. You can use ExpectedAttributeValue
in one of two different
+ * ways:
+ * Use AttributeValueList
to specify one or more values to compare against an
- * attribute. Use ComparisonOperator
to specify how you want to perform the
- * comparison. If the comparison evaluates to true, then the conditional operation
- * succeeds.
Use AttributeValueList
to specify one or more values to compare
+ * against an attribute. Use ComparisonOperator
to specify how you
+ * want to perform the comparison. If the comparison evaluates to true, then the
+ * conditional operation succeeds.
Use Value
to specify a value that DynamoDB will compare against an attribute. If the
- * values match, then ExpectedAttributeValue
evaluates to true and the conditional
- * operation succeeds. Optionally, you can also set Exists
to false, indicating that
- * you do not expect to find the attribute value in the table. In this case, the
- * conditional operation succeeds only if the comparison evaluates to false.
Use Value
to specify a value that DynamoDB will compare against
+ * an attribute. If the values match, then ExpectedAttributeValue
+ * evaluates to true and the conditional operation succeeds. Optionally, you can
+ * also set Exists
to false, indicating that you do
+ * not expect to find the attribute value in the table. In this
+ * case, the conditional operation succeeds only if the comparison evaluates to
+ * false.
- * Value
and Exists
are incompatible with AttributeValueList
and
- * ComparisonOperator
. Note that if you use both sets of parameters at once, DynamoDB will
- * return a ValidationException
exception.
+ * Value
and Exists
are incompatible with
+ * AttributeValueList
and ComparisonOperator
. Note that if
+ * you use both sets of parameters at once, DynamoDB will return a
+ * ValidationException
exception.
Represents the data for the expected attribute.
- *Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
- *For more information, see Data Types in the - * Amazon DynamoDB Developer Guide.
+ *Each attribute value is described as a name-value pair. The name is the data type, and + * the value is the data itself.
+ *For more information, see Data Types in the Amazon DynamoDB Developer + * Guide.
*/ Value?: AttributeValue; /** - *Causes DynamoDB to evaluate the value before attempting a conditional operation:
- *Causes DynamoDB to evaluate the value before attempting a conditional + * operation:
+ *If Exists
is true
, DynamoDB will check to see if that attribute value
- * already exists in the table. If it is found, then the operation succeeds. If it is not
- * found, the operation fails with a ConditionCheckFailedException
.
If Exists
is true
, DynamoDB will check to
+ * see if that attribute value already exists in the table. If it is found, then
+ * the operation succeeds. If it is not found, the operation fails with a
+ * ConditionCheckFailedException
.
If Exists
is false
, DynamoDB assumes that the attribute value does
- * not exist in the table. If in fact the value does not exist, then the assumption
- * is valid and the operation succeeds. If the value is found, despite the assumption that it
- * does not exist, the operation fails with a ConditionCheckFailedException
.
If Exists
is false
, DynamoDB assumes that
+ * the attribute value does not exist in the table. If in fact the value does not
+ * exist, then the assumption is valid and the operation succeeds. If the value is
+ * found, despite the assumption that it does not exist, the operation fails with a
+ * ConditionCheckFailedException
.
The default setting for Exists
is true
. If you supply a Value
all
- * by itself, DynamoDB assumes the attribute exists: You don't have to set Exists
to
- * true
, because it is implied.
DynamoDB returns a ValidationException
if:
The default setting for Exists
is true
. If you supply a
+ * Value
all by itself, DynamoDB assumes the attribute exists:
+ * You don't have to set Exists
to true
, because it is
+ * implied.
DynamoDB returns a ValidationException
if:
- * Exists
is true
but there is no Value
to check. (You expect a
- * value to exist, but don't specify what that value is.)
+ * Exists
is true
but there is no Value
to
+ * check. (You expect a value to exist, but don't specify what that value
+ * is.)
- * Exists
is false
but you also provide a Value
. (You cannot
- * expect an attribute to have a value, while also expecting it not to exist.)
+ * Exists
is false
but you also provide a
+ * Value
. (You cannot expect an attribute to have a value, while
+ * also expecting it not to exist.)
A comparator for evaluating attributes in the AttributeValueList
. For example, equals,
- * greater than, less than, etc.
The following comparison operators are available:
- *
- * EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN
- *
The following are descriptions of each comparison operator.
- *
- * EQ
: Equal. EQ
is supported for all data types, including lists and maps.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not equal {"NS":["6", "2", "1"]}
.
- * NE
: Not equal. NE
is supported for all data types, including lists and maps.
- * AttributeValueList
can contain only one AttributeValue
of type String,
- * Number, Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue
of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not equal {"NS":["6", "2", "1"]}
.
- * LE
: Less than or equal.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * LT
: Less than.
- * AttributeValueList
can contain only one AttributeValue
of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * GE
: Greater than or equal.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * GT
: Greater than.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If an item contains an AttributeValue
element of a different
- * type than the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not equal {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
.
- * NOT_NULL
: The attribute exists. NOT_NULL
is supported for all data types, including lists and maps.
This operator tests for the existence of an attribute, not its data type. If the data type of attribute "a
" is null, and you evaluate it using NOT_NULL
, the result is a Boolean true
. This result is because the attribute "a
" exists; its data type is not relevant to the NOT_NULL
comparison operator.
- * NULL
: The attribute does not exist. NULL
is supported for all data types, including lists and maps.
This operator tests for the nonexistence of an attribute, not its data type. If the data type of attribute "a
" is null, and you evaluate it using NULL
, the result is a Boolean false
. This is because the attribute "a
" exists; its data type is not relevant to the NULL
comparison operator.
- * CONTAINS
: Checks for a subsequence, or value in a set.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If the target attribute of the comparison is of type String, then
- * the operator checks for a substring match. If the target attribute of the comparison is
- * of type Binary, then the operator looks for a subsequence of the target that matches the input.
- * If the target attribute of the comparison is a set ("SS
", "NS
", or "BS
"), then the
- * operator evaluates to true if it finds an exact match with any member of the set.
CONTAINS is supported for lists: When evaluating "a CONTAINS b
", "a
" can be a list; however, "b
" cannot be a set, a map, or a list.
- * NOT_CONTAINS
: Checks for absence of a subsequence, or absence of a value in
- * a set.
- * AttributeValueList
can contain only one AttributeValue
element of type String,
- * Number, or Binary (not a set type). If the target attribute of the comparison is a String, then
- * the operator checks for the absence of a substring match. If the target attribute of the
- * comparison is Binary, then the operator checks for the absence of a subsequence of the
- * target that matches the input. If the target attribute of the comparison is a set ("SS
",
- * "NS
", or "BS
"), then the operator evaluates to true if it does not find an exact match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b
", "a
" can be a list; however, "b
" cannot be a set, a map, or a list.
- * BEGINS_WITH
: Checks for a prefix.
- * AttributeValueList
can contain only one AttributeValue
of type String or
- * Binary (not a Number or a set type). The target attribute of the comparison must be of type String or
- * Binary (not a Number or a set type).
- * IN
: Checks for matching elements in a list.
- * AttributeValueList
can contain one or more AttributeValue
- * elements of type String, Number, or Binary. These attributes are compared against an
- * existing attribute of an item. If any elements of the input are equal to the item
- * attribute, the expression evaluates to true.
- * BETWEEN
: Greater than or equal to the first value, and less than or equal
- * to the second value.
- * AttributeValueList
must contain two AttributeValue
elements of the same
- * type, either String, Number, or Binary (not a set type). A target attribute matches if the
- * target value is greater than, or equal to, the first element and less than, or equal to,
- * the second element. If an item contains an AttributeValue
element of a different type than
- * the one provided in the request, the value does not match. For example,
- * {"S":"6"}
does not compare to {"N":"6"}
. Also,
- * {"N":"6"}
does not compare to {"NS":["6", "2", "1"]}
- *
A comparator for evaluating attributes in the AttributeValueList
. For
+ * example, equals, greater than, less than, etc.
The following comparison operators are available:
+ *
+ * EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ * BEGINS_WITH | IN | BETWEEN
+ *
The following are descriptions of each comparison operator.
+ *
+ * EQ
: Equal. EQ
is supported for all data types,
+ * including lists and maps.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, Binary, String Set, Number Set, or Binary Set.
+ * If an item contains an AttributeValue
element of a different type
+ * than the one provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not equal {"NS":["6", "2",
+ * "1"]}
.
+ * NE
: Not equal. NE
is supported for all data types,
+ * including lists and maps.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String, Number, Binary, String Set, Number Set, or Binary Set. If an
+ * item contains an AttributeValue
of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not equal {"NS":["6", "2",
+ * "1"]}
.
+ * LE
: Less than or equal.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * LT
: Less than.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String, Number, or Binary (not a set type). If an item contains an
+ * AttributeValue
element of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not equal {"N":"6"}
. Also,
+ * {"N":"6"}
does not compare to {"NS":["6", "2",
+ * "1"]}
.
+ * GE
: Greater than or equal.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * GT
: Greater than.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If an item contains
+ * an AttributeValue
element of a different type than the one provided
+ * in the request, the value does not match. For example, {"S":"6"}
+ * does not equal {"N":"6"}
. Also, {"N":"6"}
does not
+ * compare to {"NS":["6", "2", "1"]}
.
+ * NOT_NULL
: The attribute exists. NOT_NULL
is supported
+ * for all data types, including lists and maps.
This operator tests for the existence of an attribute, not its data type.
+ * If the data type of attribute "a
" is null, and you evaluate it
+ * using NOT_NULL
, the result is a Boolean true
. This
+ * result is because the attribute "a
" exists; its data type is
+ * not relevant to the NOT_NULL
comparison operator.
+ * NULL
: The attribute does not exist. NULL
is supported
+ * for all data types, including lists and maps.
This operator tests for the nonexistence of an attribute, not its data
+ * type. If the data type of attribute "a
" is null, and you
+ * evaluate it using NULL
, the result is a Boolean
+ * false
. This is because the attribute "a
"
+ * exists; its data type is not relevant to the NULL
comparison
+ * operator.
+ * CONTAINS
: Checks for a subsequence, or value in a set.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If the target
+ * attribute of the comparison is of type String, then the operator checks for a
+ * substring match. If the target attribute of the comparison is of type Binary,
+ * then the operator looks for a subsequence of the target that matches the input.
+ * If the target attribute of the comparison is a set ("SS
",
+ * "NS
", or "BS
"), then the operator evaluates to
+ * true if it finds an exact match with any member of the set.
CONTAINS is supported for lists: When evaluating "a CONTAINS b
",
+ * "a
" can be a list; however, "b
" cannot be a set, a
+ * map, or a list.
+ * NOT_CONTAINS
: Checks for absence of a subsequence, or absence of a
+ * value in a set.
+ * AttributeValueList
can contain only one AttributeValue
+ * element of type String, Number, or Binary (not a set type). If the target
+ * attribute of the comparison is a String, then the operator checks for the
+ * absence of a substring match. If the target attribute of the comparison is
+ * Binary, then the operator checks for the absence of a subsequence of the target
+ * that matches the input. If the target attribute of the comparison is a set
+ * ("SS
", "NS
", or "BS
"), then the
+ * operator evaluates to true if it does not find an exact
+ * match with any member of the set.
NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS
+ * b
", "a
" can be a list; however, "b
" cannot
+ * be a set, a map, or a list.
+ * BEGINS_WITH
: Checks for a prefix.
+ * AttributeValueList
can contain only one AttributeValue
+ * of type String or Binary (not a Number or a set type). The target attribute of
+ * the comparison must be of type String or Binary (not a Number or a set
+ * type).
+ * IN
: Checks for matching elements in a list.
+ * AttributeValueList
can contain one or more
+ * AttributeValue
elements of type String, Number, or Binary.
+ * These attributes are compared against an existing attribute of an item. If any
+ * elements of the input are equal to the item attribute, the expression evaluates
+ * to true.
+ * BETWEEN
: Greater than or equal to the first value, and less than
+ * or equal to the second value.
+ * AttributeValueList
must contain two AttributeValue
+ * elements of the same type, either String, Number, or Binary (not a set type). A
+ * target attribute matches if the target value is greater than, or equal to, the
+ * first element and less than, or equal to, the second element. If an item
+ * contains an AttributeValue
element of a different type than the one
+ * provided in the request, the value does not match. For example,
+ * {"S":"6"}
does not compare to {"N":"6"}
. Also,
+ * {"N":"6"}
does not compare to {"NS":["6", "2",
+ * "1"]}
+ *
One or more values to evaluate against the supplied attribute. The number of values in the
- * list depends on the ComparisonOperator
being used.
For type Number, value comparisons are numeric.
- *String value comparisons for greater than, equals, or less than are based on ASCII character
- * code values. For example, a
is greater than A
, and a
- * is greater than B
. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.
For Binary, DynamoDB treats each byte of the binary data as unsigned when it compares binary values.
- *For information on specifying data types in JSON, see JSON Data Format in the Amazon DynamoDB Developer Guide.
+ *One or more values to evaluate against the supplied attribute. The number of values in
+ * the list depends on the ComparisonOperator
being used.
For type Number, value comparisons are numeric.
+ *String value comparisons for greater than, equals, or less than are based on ASCII
+ * character code values. For example, a
is greater than A
, and
+ * a
is greater than B
. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters.
For Binary, DynamoDB treats each byte of the binary data as unsigned when it + * compares binary values.
+ *For information on specifying data types in JSON, see JSON Data Format + * in the Amazon DynamoDB Developer Guide.
*/ AttributeValueList?: AttributeValue[]; } @@ -8047,15 +8423,15 @@ export namespace ExpectedAttributeValue { export interface TransactGetItemsInput { /** - *An ordered array of up to 25 TransactGetItem
objects,
- * each of which contains a Get
structure.
An ordered array of up to 25 TransactGetItem
objects, each of which
+ * contains a Get
structure.
A value of TOTAL
causes consumed capacity information
- * to be returned, and a value of NONE
prevents that information
- * from being returned. No other value is valid.
A value of TOTAL
causes consumed capacity information to be returned, and
+ * a value of NONE
prevents that information from being returned. No other
+ * value is valid.
The capacity units consumed by the entire TransactWriteItems
- * operation. The values of the list are ordered according to
- * the ordering of the TransactItems
request parameter.
- *
The capacity units consumed by the entire TransactWriteItems
operation.
+ * The values of the list are ordered according to the ordering of the
+ * TransactItems
request parameter.
Represents a request to perform a check that an item exists or to check the condition of - * specific attributes of the item.
+ *Represents a request to perform a check that an item exists or to check the condition + * of specific attributes of the item.
*/ export interface ConditionCheck { /** - *The primary key of the item to be checked. Each element consists of an - * attribute name and a value for that attribute.
+ *The primary key of the item to be checked. Each element consists of an attribute name + * and a value for that attribute.
*/ Key: { [key: string]: AttributeValue } | undefined; @@ -8125,7 +8500,8 @@ export interface ConditionCheck { TableName: string | undefined; /** - *A condition that must be satisfied in order for a conditional update to succeed.
+ *A condition that must be satisfied in order for a conditional update to + * succeed.
*/ ConditionExpression: string | undefined; @@ -8140,10 +8516,10 @@ export interface ConditionCheck { ExpressionAttributeValues?: { [key: string]: AttributeValue }; /** - *Use ReturnValuesOnConditionCheckFailure
to
- * get the item attributes if the ConditionCheck
condition fails.
- * For ReturnValuesOnConditionCheckFailure
, the valid
- * values are: NONE and ALL_OLD.
Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the
+ * ConditionCheck
condition fails. For
+ * ReturnValuesOnConditionCheckFailure
, the valid values are: NONE and
+ * ALL_OLD.
The primary key of the item to be deleted. Each element consists of an - * attribute name and a value for that attribute.
+ *The primary key of the item to be deleted. Each element consists of an attribute name + * and a value for that attribute.
*/ Key: { [key: string]: AttributeValue } | undefined; @@ -8191,7 +8567,8 @@ export interface Delete { TableName: string | undefined; /** - *A condition that must be satisfied in order for a conditional delete to succeed.
+ *A condition that must be satisfied in order for a conditional delete to + * succeed.
*/ ConditionExpression?: string; @@ -8206,10 +8583,10 @@ export interface Delete { ExpressionAttributeValues?: { [key: string]: AttributeValue }; /** - *Use ReturnValuesOnConditionCheckFailure
to
- * get the item attributes if the Delete
condition fails.
- * For ReturnValuesOnConditionCheckFailure
, the valid
- * values are: NONE and ALL_OLD.
Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the
+ * Delete
condition fails. For
+ * ReturnValuesOnConditionCheckFailure
, the valid values are: NONE and
+ * ALL_OLD.
A map of attribute name to attribute values, representing the primary key
- * of the item to be written by PutItem
. All of the table's primary key
- * attributes must be specified, and their data types must match those of the table's
- * key schema. If any attributes are present in the item that are part of an index
- * key schema for the table, their types must match the index key schema.
A map of attribute name to attribute values, representing the primary key of the item
+ * to be written by PutItem
. All of the table's primary key attributes must be
+ * specified, and their data types must match those of the table's key schema. If any
+ * attributes are present in the item that are part of an index key schema for the table,
+ * their types must match the index key schema.
A condition that must be satisfied in order for a conditional update to succeed.
+ *A condition that must be satisfied in order for a conditional update to + * succeed.
*/ ConditionExpression?: string; @@ -8275,10 +8653,10 @@ export interface Put { ExpressionAttributeValues?: { [key: string]: AttributeValue }; /** - *Use ReturnValuesOnConditionCheckFailure
to
- * get the item attributes if the Put
condition fails.
- * For ReturnValuesOnConditionCheckFailure
, the valid
- * values are: NONE and ALL_OLD.
Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the
+ * Put
condition fails. For
+ * ReturnValuesOnConditionCheckFailure
, the valid values are: NONE and
+ * ALL_OLD.
The primary key of the item to be updated. Each element consists of - * an attribute name and a value for that attribute.
+ *The primary key of the item to be updated. Each element consists of an attribute name + * and a value for that attribute.
*/ Key: { [key: string]: AttributeValue } | undefined; /** - *An expression that defines one or more attributes to be updated, - * the action to be performed on them, and new value(s) for them.
+ *An expression that defines one or more attributes to be updated, the action to be + * performed on them, and new value(s) for them.
*/ UpdateExpression: string | undefined; @@ -8333,7 +8711,7 @@ export interface Update { /** *A condition that must be satisfied in order for a conditional update to - * succeed.
+ * succeed. */ ConditionExpression?: string; @@ -8348,10 +8726,10 @@ export interface Update { ExpressionAttributeValues?: { [key: string]: AttributeValue }; /** - *Use ReturnValuesOnConditionCheckFailure
to
- * get the item attributes if the Update
condition fails.
- * For ReturnValuesOnConditionCheckFailure
, the valid
- * values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
Use ReturnValuesOnConditionCheckFailure
to get the item attributes if the
+ * Update
condition fails. For
+ * ReturnValuesOnConditionCheckFailure
, the valid values are: NONE,
+ * ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
A map of attribute names to AttributeValue
objects, representing the item as it
- * appeared before the DeleteItem
operation. This map appears in the response only if
- * ReturnValues
was specified as ALL_OLD
in the request.
A map of attribute names to AttributeValue
objects, representing the item
+ * as it appeared before the DeleteItem
operation. This map appears in the
+ * response only if ReturnValues
was specified as ALL_OLD
in the
+ * request.
Information about item collections, if any, that were affected by the
- * DeleteItem
operation.
- * ItemCollectionMetrics
is only returned if the
- * ReturnItemCollectionMetrics
parameter was specified. If the
+ * DeleteItem
operation. ItemCollectionMetrics
is only
+ * returned if the ReturnItemCollectionMetrics
parameter was specified. If the
* table does not have any local secondary indexes, this information is not returned in the
* response.
Each ItemCollectionMetrics
- * element consists of:
Each ItemCollectionMetrics
element consists of:
- * ItemCollectionKey
- The partition key value of the item
- * collection. This is the same as the partition key value of the item itself.
+ * ItemCollectionKey
- The partition key value of the item collection.
+ * This is the same as the partition key value of the item itself.
- * SizeEstimateRangeGB
- An estimate of item collection size,
- * in gigabytes. This value is a two-element array
- * containing a lower bound and an upper bound for the
- * estimate. The estimate includes the size of all the
- * items in the table, plus the size of all attributes
- * projected into all of the local secondary indexes on that
- * table. Use this estimate to measure whether a local secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
+ *
+ * SizeEstimateRangeGB
- An estimate of item collection size, in
+ * gigabytes. This value is a two-element array containing a lower bound and an
+ * upper bound for the estimate. The estimate includes the size of all the items in
+ * the table, plus the size of all attributes projected into all of the local
+ * secondary indexes on that table. Use this estimate to measure whether a local
+ * secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the + * precision or accuracy of the estimate.
*The attribute values as they appeared before the PutItem
operation, but only if
- * ReturnValues
is specified as ALL_OLD
in the request. Each element
- * consists of an attribute name and an attribute value.
The attribute values as they appeared before the PutItem
operation, but
+ * only if ReturnValues
is specified as ALL_OLD
in the request.
+ * Each element consists of an attribute name and an attribute value.
Information about item collections, if any, that were affected by the
- * PutItem
operation. ItemCollectionMetrics
- * is only returned if the ReturnItemCollectionMetrics
parameter was specified. If the table does not have any local
- * secondary indexes, this information is not returned in the response.
Each ItemCollectionMetrics
- * element consists of:
- * ItemCollectionKey
- The partition key value of the item
- * collection. This is the same as the partition key value of the item itself.
- * SizeEstimateRangeGB
- An estimate of item collection size,
- * in gigabytes. This value is a two-element array
- * containing a lower bound and an upper bound for the
- * estimate. The estimate includes the size of all the
- * items in the table, plus the size of all attributes
- * projected into all of the local secondary indexes on that
- * table. Use this estimate to measure whether a local secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
+ *PutItem
operation. ItemCollectionMetrics
is only returned
+ * if the ReturnItemCollectionMetrics
parameter was specified. If the table
+ * does not have any local secondary indexes, this information is not returned in the
+ * response.
+ * Each ItemCollectionMetrics
element consists of:
+ * ItemCollectionKey
- The partition key value of the item collection.
+ * This is the same as the partition key value of the item itself.
+ * SizeEstimateRangeGB
- An estimate of item collection size, in
+ * gigabytes. This value is a two-element array containing a lower bound and an
+ * upper bound for the estimate. The estimate includes the size of all the items in
+ * the table, plus the size of all attributes projected into all of the local
+ * secondary indexes on that table. Use this estimate to measure whether a local
+ * secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the + * precision or accuracy of the estimate.
*An array of item attributes that match the query criteria. Each element in this array consists of an attribute name and the value for that attribute.
+ *An array of item attributes that match the query criteria. Each element in this array + * consists of an attribute name and the value for that attribute.
*/ Items?: { [key: string]: AttributeValue }[]; /** *The number of items in the response.
- *If you used a QueryFilter
in the request, then Count
is the number of items
- * returned after the filter was applied, and ScannedCount
is the number of
- * matching items before the filter was applied.
If you did not use a filter in the request, then Count
and ScannedCount
are the
- * same.
If you used a QueryFilter
in the request, then Count
is the
+ * number of items returned after the filter was applied, and ScannedCount
is
+ * the number of matching items before the filter was applied.
If you did not use a filter in the request, then Count
and
+ * ScannedCount
are the same.
The number of items evaluated, before any QueryFilter
is applied. A high
- * ScannedCount
value with few, or no, Count
results indicates an inefficient
- * Query
operation. For more information, see Count and ScannedCount in the
- * Amazon DynamoDB Developer Guide.
If you did not use a filter in the request, then ScannedCount
is the same as
- * Count
.
ScannedCount
value with few, or no, Count
results
+ * indicates an inefficient Query
operation. For more information, see Count and
+ * ScannedCount in the Amazon DynamoDB Developer
+ * Guide.
+ * If you did not use a filter in the request, then ScannedCount
is the same
+ * as Count
.
The primary key of the item where the operation stopped, inclusive of the previous result set. Use this value to start a new operation, excluding this value in the new request.
- *If LastEvaluatedKey
is empty, then the "last page" of results has been processed and there is no more data to be retrieved.
If LastEvaluatedKey
is not empty, it does not necessarily mean that there is more data in the result set. The only way to know when you have reached the end of the result set is when LastEvaluatedKey
is empty.
The primary key of the item where the operation stopped, inclusive of the previous + * result set. Use this value to start a new operation, excluding this value in the new + * request.
+ *If LastEvaluatedKey
is empty, then the "last page" of results has been
+ * processed and there is no more data to be retrieved.
If LastEvaluatedKey
is not empty, it does not necessarily mean that there
+ * is more data in the result set. The only way to know when you have reached the end of
+ * the result set is when LastEvaluatedKey
is empty.
An array of item attributes that match the scan criteria. Each element in this array consists of an attribute name and the value for that attribute.
+ *An array of item attributes that match the scan criteria. Each element in this array + * consists of an attribute name and the value for that attribute.
*/ Items?: { [key: string]: AttributeValue }[]; /** *The number of items in the response.
- *If you set ScanFilter
in the request, then Count
is the number of items
- * returned after the filter was applied, and ScannedCount
is the number of matching items
- * before the filter was applied.
If you did not use a filter in the request, then Count
is the same as
- * ScannedCount
.
If you set ScanFilter
in the request, then Count
is the
+ * number of items returned after the filter was applied, and ScannedCount
is
+ * the number of matching items before the filter was applied.
If you did not use a filter in the request, then Count
is the same as
+ * ScannedCount
.
The number of items evaluated, before any ScanFilter
is applied. A high
- * ScannedCount
value with few, or no, Count
results indicates an inefficient
- * Scan
operation. For more information, see Count and ScannedCount in the
- * Amazon DynamoDB Developer Guide.
If you did not use a filter in the request, then ScannedCount
is the same as
- * Count
.
ScannedCount
value with few, or no, Count
results
+ * indicates an inefficient Scan
operation. For more information, see Count and
+ * ScannedCount in the Amazon DynamoDB Developer
+ * Guide.
+ * If you did not use a filter in the request, then ScannedCount
is the same
+ * as Count
.
The primary key of the item where the operation stopped, inclusive of the previous * result set. Use this value to start a new operation, excluding this value in the new * request.
- *If LastEvaluatedKey
is empty, then the "last page" of results
- * has been processed and there is no more data to be retrieved.
If LastEvaluatedKey
is not empty, it does not necessarily mean
- * that there is more data in the result set. The only way to know when you have reached
- * the end of the result set is when LastEvaluatedKey
is
- * empty.
If LastEvaluatedKey
is empty, then the "last page" of results has been
+ * processed and there is no more data to be retrieved.
If LastEvaluatedKey
is not empty, it does not necessarily mean that there
+ * is more data in the result set. The only way to know when you have reached the end of
+ * the result set is when LastEvaluatedKey
is empty.
The capacity units consumed by the Scan
operation. The data returned includes the total
- * provisioned throughput consumed, along with statistics for the table and any indexes involved
- * in the operation. ConsumedCapacity
is only returned if the ReturnConsumedCapacity
parameter was specified.
- * For more information, see Provisioned
- * Throughput in the Amazon DynamoDB Developer Guide.
The capacity units consumed by the Scan
operation. The data returned
+ * includes the total provisioned throughput consumed, along with statistics for the table
+ * and any indexes involved in the operation. ConsumedCapacity
is only
+ * returned if the ReturnConsumedCapacity
parameter was specified. For more
+ * information, see Provisioned Throughput in the Amazon DynamoDB Developer
+ * Guide.
The capacity units consumed by the UpdateItem
operation. The data returned includes the total
- * provisioned throughput consumed, along with statistics for the table and any indexes involved
- * in the operation. ConsumedCapacity
is only returned if the ReturnConsumedCapacity
parameter was specified.
- * For more information, see Provisioned
- * Throughput in the Amazon DynamoDB Developer Guide.
The capacity units consumed by the UpdateItem
operation. The data
+ * returned includes the total provisioned throughput consumed, along with statistics for
+ * the table and any indexes involved in the operation. ConsumedCapacity
is
+ * only returned if the ReturnConsumedCapacity
parameter was specified. For
+ * more information, see Provisioned Throughput in the Amazon DynamoDB Developer
+ * Guide.
Information about item collections, if any, that were affected by the
- * UpdateItem
operation.
- * ItemCollectionMetrics
is only returned if the ReturnItemCollectionMetrics
parameter was specified. If the table
- * does not have any local secondary indexes, this information is not
- * returned in the response.
Each ItemCollectionMetrics
- * element consists of:
UpdateItem
operation. ItemCollectionMetrics
is only
+ * returned if the ReturnItemCollectionMetrics
parameter was specified. If the
+ * table does not have any local secondary indexes, this information is not returned in the
+ * response.
+ * Each ItemCollectionMetrics
element consists of:
- * ItemCollectionKey
- The partition key value of the item
- * collection. This is the same as the partition key value of the item itself.
ItemCollectionKey
- The partition key value of the item collection.
+ * This is the same as the partition key value of the item itself.
*
- * SizeEstimateRangeGB
- An estimate of item collection size,
- * in gigabytes. This value is a two-element array
- * containing a lower bound and an upper bound for the
- * estimate. The estimate includes the size of all the
- * items in the table, plus the size of all attributes
- * projected into all of the local secondary indexes on that
- * table. Use this estimate to measure whether a local secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
+ *SizeEstimateRangeGB
- An estimate of item collection size, in
+ * gigabytes. This value is a two-element array containing a lower bound and an
+ * upper bound for the estimate. The estimate includes the size of all the items in
+ * the table, plus the size of all attributes projected into all of the local
+ * secondary indexes on that table. Use this estimate to measure whether a local
+ * secondary index is approaching its size limit.
+ * The estimate is subject to change over time; therefore, do not rely on the + * precision or accuracy of the estimate.
*Represents an operation to perform - either DeleteItem
or
- * PutItem
. You can only request one of these operations, not both, in a
- * single WriteRequest
. If you do need to perform both of these operations, you
- * need to provide two separate WriteRequest
objects.
PutItem
. You can only request one of these operations, not both, in a
+ * single WriteRequest
. If you do need to perform both of these operations,
+ * you need to provide two separate WriteRequest
objects.
*/
export interface WriteRequest {
/**
@@ -8791,52 +9177,56 @@ export namespace WriteRequest {
*/
export interface BatchGetItemOutput {
/**
- * A map of table name to a list of items. Each object in Responses
consists of a table
- * name, along with a map of attribute data consisting of the data type and attribute value.
A map of table name to a list of items. Each object in Responses
consists
+ * of a table name, along with a map of attribute data consisting of the data type and
+ * attribute value.
A map of tables and their respective keys that were not processed with the current response.
- * The UnprocessedKeys
value is in the same form as RequestItems
, so the value can
- * be provided directly to a subsequent BatchGetItem
operation. For more information, see
- * RequestItems
in the Request Parameters section.
Each element consists of:
- *A map of tables and their respective keys that were not processed with the current
+ * response. The UnprocessedKeys
value is in the same form as
+ * RequestItems
, so the value can be provided directly to a subsequent
+ * BatchGetItem
operation. For more information, see
+ * RequestItems
in the Request Parameters section.
Each element consists of:
+ *
- * Keys
- An array of primary key attribute values that define specific items in the
- * table.
+ * Keys
- An array of primary key attribute values that define
+ * specific items in the table.
- * ProjectionExpression
- One or more attributes to be
- * retrieved from the table or index. By default, all attributes are returned. If a
- * requested attribute is not found, it does not appear in the result.
+ * ProjectionExpression
- One or more attributes to be retrieved from
+ * the table or index. By default, all attributes are returned. If a requested
+ * attribute is not found, it does not appear in the result.
- * ConsistentRead
- The consistency of a read operation. If set to true
,
- * then a strongly consistent read is used; otherwise, an eventually consistent read is
- * used.
+ * ConsistentRead
- The consistency of a read operation. If set to
+ * true
, then a strongly consistent read is used; otherwise, an
+ * eventually consistent read is used.
If there are no unprocessed keys remaining, the response contains an empty
- * UnprocessedKeys
map.
If there are no unprocessed keys remaining, the response contains an empty
+ * UnprocessedKeys
map.
The read capacity units consumed by the entire BatchGetItem
operation.
Each element consists of:
- *The read capacity units consumed by the entire BatchGetItem
+ * operation.
Each element consists of:
+ *
- * TableName
- The table that consumed the provisioned throughput.
+ * TableName
- The table that consumed the provisioned
+ * throughput.
- * CapacityUnits
- The total number of capacity units consumed.
+ * CapacityUnits
- The total number of capacity units consumed.
The name of the table containing the requested items; or, if you provide
- * IndexName
, the name of the table to which that index belongs.
IndexName
, the name of the table to which that index belongs.
*/
TableName: string | undefined;
/**
- * The name of a secondary index to scan. This index can be any local secondary index or global secondary index. Note that if you use the IndexName
parameter, you must also provide TableName
.
The name of a secondary index to scan. This index can be any local secondary index or
+ * global secondary index. Note that if you use the IndexName
parameter, you
+ * must also provide TableName
.
This is a legacy parameter. Use ProjectionExpression
instead. For more information, see
- * AttributesToGet in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ProjectionExpression
instead. For more
+ * information, see AttributesToGet in the Amazon DynamoDB Developer
+ * Guide.
The maximum number of items to evaluate (not necessarily the number of matching items).
- * If DynamoDB processes the number of items up to the limit while processing the results,
- * it stops the operation and returns the matching values up to that point, and a key in
- * LastEvaluatedKey
to apply in a subsequent operation, so that you can
+ *
The maximum number of items to evaluate (not necessarily the number of matching
+ * items). If DynamoDB processes the number of items up to the limit while processing the
+ * results, it stops the operation and returns the matching values up to that point, and a
+ * key in LastEvaluatedKey
to apply in a subsequent operation, so that you can
* pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
* DynamoDB reaches this limit, it stops the operation and returns the matching values up
* to the limit, and a key in LastEvaluatedKey
to apply in a subsequent
@@ -8913,13 +9306,12 @@ export interface ScanInput {
Limit?: number;
/**
- *
The attributes to be returned in the - * result. You can retrieve all item attributes, specific item attributes, the count of - * matching items, or in the case of an index, some or all of the attributes projected into - * the index.
- *The attributes to be returned in the result. You can retrieve all item attributes, + * specific item attributes, the count of matching items, or in the case of an index, some + * or all of the attributes projected into the index.
+ *+ *
* ALL_ATTRIBUTES
- Returns all of the item attributes from the
* specified table or index. If you query a local secondary index, then for each
* matching item in the index, DynamoDB fetches the entire item from the parent
@@ -8928,201 +9320,233 @@ export interface ScanInput {
* required.
- * ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.
- * Retrieves all attributes that have been projected into the index. If the
- * index is configured to project all attributes, this return value is
- * equivalent to specifying ALL_ATTRIBUTES
.
+ * ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.
+ * Retrieves all attributes that have been projected into the index. If the index
+ * is configured to project all attributes, this return value is equivalent to
+ * specifying ALL_ATTRIBUTES
.
- * COUNT
- Returns the number of matching items, rather than the
- * matching items themselves.
+ * COUNT
- Returns the number of matching items, rather than the
+ * matching items themselves.
- * SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in
- * AttributesToGet
. This return value is equivalent to
- * specifying AttributesToGet
without specifying any value
- * for Select
.
If you query or scan a local secondary index and request only attributes that + *
+ * SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in
+ * AttributesToGet
. This return value is equivalent to specifying
+ * AttributesToGet
without specifying any value for
+ * Select
.
If you query or scan a local secondary index and request only attributes that * are projected into that index, the operation reads only the index and not the * table. If any of the requested attributes are not projected into the local * secondary index, DynamoDB fetches each of these attributes from the parent * table. This extra fetching incurs additional throughput cost and latency.
- *If you query or scan a global secondary index, you can only request - * attributes that are projected into the index. Global secondary index queries - * cannot fetch attributes from the parent table.
+ *If you query or scan a global secondary index, you can only request attributes + * that are projected into the index. Global secondary index queries cannot fetch + * attributes from the parent table.
*If neither Select
nor AttributesToGet
- * are specified, DynamoDB defaults to ALL_ATTRIBUTES
when accessing a
- * table, and ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot
- * use both Select
and AttributesToGet
- * together in a single request, unless the value for Select
is
- * SPECIFIC_ATTRIBUTES
. (This usage is equivalent to specifying
- * AttributesToGet
without any value for
- * Select
.)
If you use the ProjectionExpression
parameter, then
- * the value for Select
can only be
- * SPECIFIC_ATTRIBUTES
. Any other value for
- * Select
will return an error.
If neither Select
nor AttributesToGet
are specified,
+ * DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and
+ * ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both
+ * Select
and AttributesToGet
together in a single request,
+ * unless the value for Select
is SPECIFIC_ATTRIBUTES
. (This
+ * usage is equivalent to specifying AttributesToGet
without any value for
+ * Select
.)
If you use the ProjectionExpression
parameter, then the value for
+ * Select
can only be SPECIFIC_ATTRIBUTES
. Any other
+ * value for Select
will return an error.
This is a legacy parameter. Use FilterExpression
instead. For more information, see
- * ScanFilter in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use FilterExpression
instead. For more
+ * information, see ScanFilter in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use FilterExpression
instead. For more information, see
- * ConditionalOperator in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use FilterExpression
instead. For more
+ * information, see ConditionalOperator in the Amazon DynamoDB Developer
+ * Guide.
The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey
in the previous operation.
The data type for ExclusiveStartKey
must be String, Number or Binary. No set data types are allowed.
In a parallel scan, a
- * Scan
request that includes ExclusiveStartKey
must specify the same segment
- * whose previous Scan
returned the corresponding value of LastEvaluatedKey
.
The primary key of the first item that this operation will evaluate. Use the value
+ * that was returned for LastEvaluatedKey
in the previous operation.
The data type for ExclusiveStartKey
must be String, Number or Binary. No
+ * set data types are allowed.
In a parallel scan, a Scan
request that includes
+ * ExclusiveStartKey
must specify the same segment whose previous
+ * Scan
returned the corresponding value of
+ * LastEvaluatedKey
.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
For a parallel Scan
request, TotalSegments
represents the total number of
- * segments into which the Scan
operation will be divided. The value of
- * TotalSegments
corresponds to the number of application workers that will perform the
- * parallel scan. For example, if you want to use four application threads to scan a table or an index,
- * specify a TotalSegments
value of 4.
The value for TotalSegments
must be greater than or equal to 1, and less than or equal
- * to 1000000. If you specify a TotalSegments
value of 1, the Scan
operation will
- * be sequential rather than parallel.
If you specify TotalSegments
, you must also specify Segment
.
For a parallel Scan
request, TotalSegments
represents the
+ * total number of segments into which the Scan
operation will be divided. The
+ * value of TotalSegments
corresponds to the number of application workers
+ * that will perform the parallel scan. For example, if you want to use four application
+ * threads to scan a table or an index, specify a TotalSegments
value of
+ * 4.
The value for TotalSegments
must be greater than or equal to 1, and less
+ * than or equal to 1000000. If you specify a TotalSegments
value of 1, the
+ * Scan
operation will be sequential rather than parallel.
If you specify TotalSegments
, you must also specify
+ * Segment
.
For a parallel Scan
request, Segment
identifies an individual segment to be
- * scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For example, if you want to
- * use four application threads to scan a table or an index, then the first thread specifies a Segment
value
- * of 0, the second thread specifies 1, and so on.
The value of LastEvaluatedKey
returned from a parallel Scan
request must be
- * used as ExclusiveStartKey
with the same segment ID in a subsequent Scan
- * operation.
The value for Segment
must be greater than or equal to 0, and less than the value
- * provided for TotalSegments
.
If you provide Segment
, you must also provide TotalSegments
.
For a parallel Scan
request, Segment
identifies an
+ * individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For example, if you want
+ * to use four application threads to scan a table or an index, then the first thread
+ * specifies a Segment
value of 0, the second thread specifies 1, and so
+ * on.
The value of LastEvaluatedKey
returned from a parallel Scan
+ * request must be used as ExclusiveStartKey
with the same segment ID in a
+ * subsequent Scan
operation.
The value for Segment
must be greater than or equal to 0, and less than
+ * the value provided for TotalSegments
.
If you provide Segment
, you must also provide
+ * TotalSegments
.
A string that identifies one or more attributes to retrieve from the specified table or index. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.
- *If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.
- *For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer + *
A string that identifies one or more attributes to retrieve from the specified table + * or index. These attributes can include scalars, sets, or elements of a JSON document. + * The attributes in the expression must be separated by commas.
+ *If no attribute names are specified, then all attributes will be returned. If any of + * the requested attributes are not found, they will not appear in the result.
+ *For more information, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ProjectionExpression?: string; /** - *A string that contains conditions that DynamoDB applies after the Scan
operation, but
- * before the data is returned to you. Items that do not satisfy the FilterExpression
- * criteria are not returned.
A FilterExpression
is applied after the items have already been read; the process of
- * filtering does not consume any additional read capacity units.
For more information, see - * Filter Expressions in the Amazon DynamoDB Developer Guide.
+ *A string that contains conditions that DynamoDB applies after the Scan
+ * operation, but before the data is returned to you. Items that do not satisfy the
+ * FilterExpression
criteria are not returned.
A FilterExpression
is applied after the items have already been read;
+ * the process of filtering does not consume any additional read capacity units.
For more information, see Filter + * Expressions in the Amazon DynamoDB Developer + * Guide.
*/ FilterExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; /** *One or more values that can be substituted in an expression.
- *Use the : (colon) character in an expression to + *
Use the : (colon) character in an expression to
* dereference an attribute value. For example, suppose that you wanted to check whether
* the value of the ProductStatus
attribute was one of the following:
+ *
* Available | Backordered | Discontinued
- *
You would first need to specify ExpressionAttributeValues
as follows:
- * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }
- *
You could then use these values in an expression, such as this:
- *+ *
+ *You would first need to specify ExpressionAttributeValues
as
+ * follows:
+ * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ * ":disc":{"S":"Discontinued"} }
+ *
You could then use these values in an expression, such as this:
+ *
* ProductStatus IN (:avail, :back, :disc)
- *
For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ @@ -9130,19 +9554,23 @@ export interface ScanInput { /** *A Boolean value that determines the read consistency model during the scan:
- *If ConsistentRead
is false
, then the data returned from
- * Scan
might not contain the results from other recently
+ *
If ConsistentRead
is false
, then the data returned
+ * from Scan
might not contain the results from other recently
* completed write operations (PutItem
, UpdateItem
, or
* DeleteItem
).
If ConsistentRead
is true
, then all of the write operations that completed before the Scan
began are guaranteed to be contained in the Scan
response.
If ConsistentRead
is true
, then all of the write
+ * operations that completed before the Scan
began are guaranteed to
+ * be contained in the Scan
response.
The default setting for ConsistentRead
is false
.
The ConsistentRead
parameter is not supported on global secondary indexes. If you scan a global secondary index with ConsistentRead
set to true, you will receive a ValidationException
.
The default setting for ConsistentRead
is false
.
The ConsistentRead
parameter is not supported on global secondary
+ * indexes. If you scan a global secondary index with ConsistentRead
set to
+ * true, you will receive a ValidationException
.
A map of one or more table names and, for each table, a list of operations to be performed
- * (DeleteRequest
or PutRequest
). Each element in the map consists of the
- * following:
A map of one or more table names and, for each table, a list of operations to be
+ * performed (DeleteRequest
or PutRequest
). Each element in the
+ * map consists of the following:
- * DeleteRequest
- Perform a DeleteItem
operation on the specified item. The
- * item to be deleted is identified by a Key
subelement:
+ * DeleteRequest
- Perform a DeleteItem
operation on the
+ * specified item. The item to be deleted is identified by a Key
+ * subelement:
- * Key
- A map of primary key attribute values that uniquely identify the item.
- * Each entry in this map consists of an attribute name and an attribute value. For each
- * primary key, you must provide all of the key attributes. For example, with a
- * simple primary key, you only need to provide a value for the partition key. For a
- * composite primary key, you must provide values for both the partition key and the sort key.
+ * Key
- A map of primary key attribute values that uniquely
+ * identify the item. Each entry in this map consists of an attribute name
+ * and an attribute value. For each primary key, you must provide
+ * all of the key attributes. For example, with a
+ * simple primary key, you only need to provide a value for the partition
+ * key. For a composite primary key, you must provide values for
+ * both the partition key and the sort key.
- * PutRequest
- Perform a PutItem
operation on the specified item. The item to
- * be put is identified by an Item
subelement:
+ * PutRequest
- Perform a PutItem
operation on the
+ * specified item. The item to be put is identified by an Item
+ * subelement:
+ *
* Item
- A map of attributes and their values. Each entry in
* this map consists of an attribute name and an attribute value. Attribute
* values must not be null; string and binary type attributes must have
* lengths greater than zero; and set type attributes must not be empty.
* Requests that contain empty values are rejected with a
* ValidationException
exception.
If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.
- *If you specify any attributes that are part of an index key, then the + * data types for those attributes must match those of the schema in the + * table's attribute definition.
+ *Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Determines whether item collection metrics are returned. If set to SIZE
, the response includes statistics about item collections, if any, that were modified during
- * the operation are returned in the response. If set to NONE
(the default), no statistics are returned.
Determines whether item collection metrics are returned. If set to SIZE
,
+ * the response includes statistics about item collections, if any, that were modified
+ * during the operation are returned in the response. If set to NONE
(the
+ * default), no statistics are returned.
A map of attribute names to AttributeValue
objects, representing the primary key of
- * the item to delete.
For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
+ *A map of attribute names to AttributeValue
objects, representing the
+ * primary key of the item to delete.
For the primary key, you must provide all of the attributes. For example, with a + * simple primary key, you only need to provide a value for the partition key. For a + * composite primary key, you must provide values for both the partition key and the sort + * key.
*/ Key: { [key: string]: AttributeValue } | undefined; /** - *This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * Expected in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see Expected in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * ConditionalOperator in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see ConditionalOperator in the Amazon DynamoDB Developer
+ * Guide.
Use ReturnValues
if you want to get the item attributes as they appeared before they
- * were deleted. For DeleteItem
, the valid values are:
Use ReturnValues
if you want to get the item attributes as they appeared
+ * before they were deleted. For DeleteItem
, the valid values are:
- * NONE
- If ReturnValues
is not specified, or if its value is
- * NONE
, then nothing is returned. (This setting is the default for
- * ReturnValues
.)
+ * NONE
- If ReturnValues
is not specified, or if its
+ * value is NONE
, then nothing is returned. (This setting is the
+ * default for ReturnValues
.)
- * ALL_OLD
- The content of the old item is returned.
+ * ALL_OLD
- The content of the old item is returned.
The ReturnValues
parameter is used by several DynamoDB operations; however,
- * DeleteItem
does not recognize any values other than NONE
or
- * ALL_OLD
.
The ReturnValues
parameter is used by several DynamoDB operations;
+ * however, DeleteItem
does not recognize any values other than
+ * NONE
or ALL_OLD
.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Determines whether item collection metrics are returned. If set to SIZE
, the response includes statistics about item collections, if any, that were modified during
- * the operation are returned in the response. If set to NONE
(the default), no statistics are returned.
Determines whether item collection metrics are returned. If set to SIZE
,
+ * the response includes statistics about item collections, if any, that were modified
+ * during the operation are returned in the response. If set to NONE
(the
+ * default), no statistics are returned.
A condition that must be satisfied in order for a conditional DeleteItem
to
- * succeed.
An expression can contain any of the following:
- *A condition that must be satisfied in order for a conditional DeleteItem
+ * to succeed.
An expression can contain any of the following:
+ *Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size
- *
These function names are case-sensitive.
+ *Functions: attribute_exists | attribute_not_exists | attribute_type |
+ * contains | begins_with | size
+ *
These function names are case-sensitive.
*Comparison operators: Comparison operators: = | <> |
+ *
= | <> |
* < | > | <= | >= |
* BETWEEN | IN
- *
Logical operators: AND | OR | NOT
- *
Logical operators: AND | OR | NOT
+ *
For more information about condition expressions, see Condition Expressions in the Amazon DynamoDB Developer + *
For more information about condition expressions, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ ConditionExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; /** *One or more values that can be substituted in an expression.
- *Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
- *+ *
Use the : (colon) character in an expression to + * dereference an attribute value. For example, suppose that you wanted to check whether + * the value of the ProductStatus attribute was one of the following:
+ *
* Available | Backordered | Discontinued
- *
You would first need to specify ExpressionAttributeValues
as follows:
- * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }
- *
You could then use these values in an expression, such as this:
- *+ *
+ *You would first need to specify ExpressionAttributeValues
as
+ * follows:
+ * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ * ":disc":{"S":"Discontinued"} }
+ *
You could then use these values in an expression, such as this:
+ *
* ProductStatus IN (:avail, :back, :disc)
- *
For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ @@ -9487,164 +9963,204 @@ export interface PutItemInput { TableName: string | undefined; /** - *A map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item.
- *You must provide all of the attributes for the primary key. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide both values for both the partition key and the sort key.
- *If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.
- *Empty String and Binary attribute values are allowed. Attribute values of type String and Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index.
+ *A map of attribute name/value pairs, one for each attribute. Only the primary key + * attributes are required; you can optionally provide other attribute name-value pairs for + * the item.
+ *You must provide all of the attributes for the primary key. For example, with a simple + * primary key, you only need to provide a value for the partition key. For a composite + * primary key, you must provide both values for both the partition key and the sort + * key.
+ *If you specify any attributes that are part of an index key, then the data types for + * those attributes must match those of the schema in the table's attribute + * definition.
+ *Empty String and Binary attribute values are allowed. Attribute values of type String + * and Binary must have a length greater than zero if the attribute is used as a key + * attribute for a table or index.
* - *For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer + *
For more information about primary keys, see Primary Key in the Amazon DynamoDB Developer * Guide.
- *Each element in the Item
map is an AttributeValue
object.
Each element in the Item
map is an AttributeValue
+ * object.
This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * Expected in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see Expected in the Amazon DynamoDB Developer
+ * Guide.
Use ReturnValues
if you want to get the item attributes as they appeared before they
- * were updated with the PutItem
request. For PutItem
, the valid values are:
Use ReturnValues
if you want to get the item attributes as they appeared
+ * before they were updated with the PutItem
request. For
+ * PutItem
, the valid values are:
- * NONE
- If ReturnValues
is not specified, or if its value is
- * NONE
, then nothing is returned. (This setting is the default for
- * ReturnValues
.)
+ * NONE
- If ReturnValues
is not specified, or if its
+ * value is NONE
, then nothing is returned. (This setting is the
+ * default for ReturnValues
.)
- * ALL_OLD
- If PutItem
overwrote an attribute name-value pair, then the
- * content of the old item is returned.
+ * ALL_OLD
- If PutItem
overwrote an attribute name-value
+ * pair, then the content of the old item is returned.
The ReturnValues
parameter is used by several DynamoDB operations; however,
- * PutItem
does not recognize any values other than NONE
or
- * ALL_OLD
.
The values returned are strongly consistent.
+ *The ReturnValues
parameter is used by several DynamoDB operations;
+ * however, PutItem
does not recognize any values other than
+ * NONE
or ALL_OLD
.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Determines whether item collection metrics are returned. If set to SIZE
, the response includes statistics about item collections, if any, that were modified during
- * the operation are returned in the response. If set to NONE
(the default), no statistics are returned.
Determines whether item collection metrics are returned. If set to SIZE
,
+ * the response includes statistics about item collections, if any, that were modified
+ * during the operation are returned in the response. If set to NONE
(the
+ * default), no statistics are returned.
This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * ConditionalOperator in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see ConditionalOperator in the Amazon DynamoDB Developer
+ * Guide.
A condition that must be satisfied in order for a conditional PutItem
operation to
- * succeed.
An expression can contain any of the following:
- *A condition that must be satisfied in order for a conditional PutItem
+ * operation to succeed.
An expression can contain any of the following:
+ *Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size
- *
These function names are case-sensitive.
+ *Functions: attribute_exists | attribute_not_exists | attribute_type |
+ * contains | begins_with | size
+ *
These function names are case-sensitive.
*Comparison operators: Comparison operators: = | <> |
+ *
= | <> |
* < | > | <= | >= |
* BETWEEN | IN
- *
Logical operators: AND | OR | NOT
- *
Logical operators: AND | OR | NOT
+ *
For more information on condition expressions, see Condition Expressions in the Amazon DynamoDB Developer + *
For more information on condition expressions, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ ConditionExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; /** *One or more values that can be substituted in an expression.
- *Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following:
- *+ *
Use the : (colon) character in an expression to + * dereference an attribute value. For example, suppose that you wanted to check whether + * the value of the ProductStatus attribute was one of the following:
+ *
* Available | Backordered | Discontinued
- *
You would first need to specify ExpressionAttributeValues
as follows:
- * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }
- *
You could then use these values in an expression, such as this:
- *+ *
+ *You would first need to specify ExpressionAttributeValues
as
+ * follows:
+ * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ * ":disc":{"S":"Discontinued"} }
+ *
You could then use these values in an expression, such as this:
+ *
* ProductStatus IN (:avail, :back, :disc)
- *
For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ @@ -9697,20 +10213,20 @@ export interface QueryInput { TableName: string | undefined; /** - *The name of an index to query. This index can be any local secondary index or global secondary index on the table. Note that
- * if you use the IndexName
parameter, you must also provide TableName.
- *
The name of an index to query. This index can be any local secondary index or global
+ * secondary index on the table. Note that if you use the IndexName
parameter,
+ * you must also provide TableName.
+ *
The attributes to be returned in the - * result. You can retrieve all item attributes, specific item attributes, the count of - * matching items, or in the case of an index, some or all of the attributes projected into - * the index.
- *The attributes to be returned in the result. You can retrieve all item attributes, + * specific item attributes, the count of matching items, or in the case of an index, some + * or all of the attributes projected into the index.
+ *+ *
* ALL_ATTRIBUTES
- Returns all of the item attributes from the
* specified table or index. If you query a local secondary index, then for each
* matching item in the index, DynamoDB fetches the entire item from the parent
@@ -9719,61 +10235,60 @@ export interface QueryInput {
* required.
- * ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.
- * Retrieves all attributes that have been projected into the index. If the
- * index is configured to project all attributes, this return value is
- * equivalent to specifying ALL_ATTRIBUTES
.
+ * ALL_PROJECTED_ATTRIBUTES
- Allowed only when querying an index.
+ * Retrieves all attributes that have been projected into the index. If the index
+ * is configured to project all attributes, this return value is equivalent to
+ * specifying ALL_ATTRIBUTES
.
- * COUNT
- Returns the number of matching items, rather than the
- * matching items themselves.
+ * COUNT
- Returns the number of matching items, rather than the
+ * matching items themselves.
- * SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in
- * AttributesToGet
. This return value is equivalent to
- * specifying AttributesToGet
without specifying any value
- * for Select
.
If you query or scan a local secondary index and request only attributes that + *
+ * SPECIFIC_ATTRIBUTES
- Returns only the attributes listed in
+ * AttributesToGet
. This return value is equivalent to specifying
+ * AttributesToGet
without specifying any value for
+ * Select
.
If you query or scan a local secondary index and request only attributes that * are projected into that index, the operation will read only the index and not * the table. If any of the requested attributes are not projected into the local * secondary index, DynamoDB fetches each of these attributes from the parent * table. This extra fetching incurs additional throughput cost and latency.
- *If you query or scan a global secondary index, you can only request - * attributes that are projected into the index. Global secondary index queries - * cannot fetch attributes from the parent table.
+ *If you query or scan a global secondary index, you can only request attributes + * that are projected into the index. Global secondary index queries cannot fetch + * attributes from the parent table.
*If neither Select
nor AttributesToGet
- * are specified, DynamoDB defaults to ALL_ATTRIBUTES
when accessing a
- * table, and ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot
- * use both Select
and AttributesToGet
- * together in a single request, unless the value for Select
is
- * SPECIFIC_ATTRIBUTES
. (This usage is equivalent to specifying
- * AttributesToGet
without any value for
- * Select
.)
If you use the ProjectionExpression
parameter, then
- * the value for Select
can only be
- * SPECIFIC_ATTRIBUTES
. Any other value for
- * Select
will return an error.
If neither Select
nor AttributesToGet
are specified,
+ * DynamoDB defaults to ALL_ATTRIBUTES
when accessing a table, and
+ * ALL_PROJECTED_ATTRIBUTES
when accessing an index. You cannot use both
+ * Select
and AttributesToGet
together in a single request,
+ * unless the value for Select
is SPECIFIC_ATTRIBUTES
. (This
+ * usage is equivalent to specifying AttributesToGet
without any value for
+ * Select
.)
If you use the ProjectionExpression
parameter, then the value for
+ * Select
can only be SPECIFIC_ATTRIBUTES
. Any other
+ * value for Select
will return an error.
This is a legacy parameter. Use ProjectionExpression
instead. For more information, see
- * AttributesToGet in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ProjectionExpression
instead. For more
+ * information, see AttributesToGet in the Amazon DynamoDB Developer
+ * Guide.
The maximum number of items to evaluate (not necessarily the number of matching items).
- * If DynamoDB processes the number of items up to the limit while processing the results,
- * it stops the operation and returns the matching values up to that point, and a key in
- * LastEvaluatedKey
to apply in a subsequent operation, so that you can
+ *
The maximum number of items to evaluate (not necessarily the number of matching
+ * items). If DynamoDB processes the number of items up to the limit while processing the
+ * results, it stops the operation and returns the matching values up to that point, and a
+ * key in LastEvaluatedKey
to apply in a subsequent operation, so that you can
* pick up where you left off. Also, if the processed dataset size exceeds 1 MB before
* DynamoDB reaches this limit, it stops the operation and returns the matching values up
* to the limit, and a key in LastEvaluatedKey
to apply in a subsequent
@@ -9783,84 +10298,112 @@ export interface QueryInput {
Limit?: number;
/**
- *
Determines the read consistency model: If set to true
, then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.
Strongly consistent reads
- * are not supported on global secondary indexes. If you query a global secondary index with ConsistentRead
set to
- * true
, you will receive a ValidationException
.
Determines the read consistency model: If set to true
, then the operation
+ * uses strongly consistent reads; otherwise, the operation uses eventually consistent
+ * reads.
Strongly consistent reads are not supported on global secondary indexes. If you query
+ * a global secondary index with ConsistentRead
set to true
, you
+ * will receive a ValidationException
.
This is a legacy parameter. Use KeyConditionExpression
instead. For more information, see
- * KeyConditions in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use KeyConditionExpression
instead. For more
+ * information, see KeyConditions in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use FilterExpression
instead. For more information, see
- * QueryFilter in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use FilterExpression
instead. For more
+ * information, see QueryFilter in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use FilterExpression
instead. For more information, see
- * ConditionalOperator in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use FilterExpression
instead. For more
+ * information, see ConditionalOperator in the Amazon DynamoDB Developer
+ * Guide.
Specifies the order for index traversal: If true
(default), the traversal is performed in ascending order; if false
, the traversal is performed in descending order.
Items with the same partition key value are stored in sorted order by sort key. If the sort key data type is Number, the results are stored in numeric order. For type String, the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each byte of the binary data as unsigned.
- *If ScanIndexForward
is true
, DynamoDB returns the results in the order in which they are stored (by sort key value). This is the default behavior. If ScanIndexForward
is false
, DynamoDB reads the results in reverse order by sort key value, and then returns the results to the client.
Specifies the order for index traversal: If true
(default), the traversal
+ * is performed in ascending order; if false
, the traversal is performed in
+ * descending order.
Items with the same partition key value are stored in sorted order by sort key. If the + * sort key data type is Number, the results are stored in numeric order. For type String, + * the results are stored in order of UTF-8 bytes. For type Binary, DynamoDB treats each + * byte of the binary data as unsigned.
+ *If ScanIndexForward
is true
, DynamoDB returns the results in
+ * the order in which they are stored (by sort key value). This is the default behavior. If
+ * ScanIndexForward
is false
, DynamoDB reads the results in
+ * reverse order by sort key value, and then returns the results to the client.
The primary key of the first item that this operation will evaluate. Use the value that was returned for LastEvaluatedKey
in the previous operation.
The data type for ExclusiveStartKey
must be String, Number, or Binary. No
+ *
The primary key of the first item that this operation will evaluate. Use the value
+ * that was returned for LastEvaluatedKey
in the previous operation.
The data type for ExclusiveStartKey
must be String, Number, or Binary. No
* set data types are allowed.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.
- *If no attribute names are specified, then all attributes will be returned. If any of the requested attributes are not found, they will not appear in the result.
- *For more information, see - * Accessing Item Attributes in the Amazon DynamoDB Developer Guide.
+ *A string that identifies one or more attributes to retrieve from the table. These + * attributes can include scalars, sets, or elements of a JSON document. The attributes in + * the expression must be separated by commas.
+ *If no attribute names are specified, then all attributes will be returned. If any of + * the requested attributes are not found, they will not appear in the result.
+ *For more information, see Accessing Item Attributes in the Amazon DynamoDB Developer + * Guide.
*/ ProjectionExpression?: string; /** - *A string that contains conditions that DynamoDB applies after the Query
operation, but
- * before the data is returned to you. Items that do not satisfy the FilterExpression
- * criteria are not returned.
A FilterExpression
does not allow key attributes. You cannot define a filter expression based on a partition key or a sort key.
A FilterExpression
is applied after the items have already been read; the process of
- * filtering does not consume any additional read capacity units.
For more information, see Filter - * Expressions in the Amazon DynamoDB Developer Guide.
+ *A string that contains conditions that DynamoDB applies after the Query
+ * operation, but before the data is returned to you. Items that do not satisfy the
+ * FilterExpression
criteria are not returned.
A FilterExpression
does not allow key attributes. You cannot define a
+ * filter expression based on a partition key or a sort key.
A FilterExpression
is applied after the items have already been read;
+ * the process of filtering does not consume any additional read capacity units.
For more information, see Filter + * Expressions in the Amazon DynamoDB Developer + * Guide.
*/ FilterExpression?: string; @@ -9868,22 +10411,24 @@ export interface QueryInput { *The condition that specifies the key values for items to be retrieved by the
* Query
action.
The condition must perform an equality test on a single partition key value.
- *The condition can optionally perform one of several comparison tests on a single
- * sort key value. This allows Query
to retrieve one item with a given
- * partition key value and sort key value, or several items that have the same partition
- * key value but different sort key values.
The condition must perform an equality test on a single partition key value.
+ *The condition can optionally perform one of several comparison tests on a single sort
+ * key value. This allows Query
to retrieve one item with a given partition
+ * key value and sort key value, or several items that have the same partition key value
+ * but different sort key values.
The partition key equality test is required, and must be specified in the following format:
+ *The partition key equality test is required, and must be specified in the following + * format:
* - *+ *
* partitionKeyName
* =
* :partitionkeyval
- *
If you also want to provide a condition for the sort key, it must be combined using AND
with the condition
- * for the sort key. Following is an example, using the = comparison operator for the sort key:
If you also want to provide a condition for the sort key, it must be combined using
+ * AND
with the condition for the sort key. Following is an example, using
+ * the = comparison operator for the sort key:
* partitionKeyName
@@ -9894,155 +10439,175 @@ export interface QueryInput {
* =
* :sortkeyval
*
Valid comparisons for the sort key condition are as follows:
- *Valid comparisons for the sort key condition are as follows:
+ *
- * sortKeyName
- * =
- * :sortkeyval
- true if the sort key value is equal to :sortkeyval
.
+ * sortKeyName
+ * =
+ * :sortkeyval
- true if the sort key value is equal to
+ * :sortkeyval
.
- * sortKeyName
- * <
- * :sortkeyval
- true if the sort key value is less than :sortkeyval
.
+ * sortKeyName
+ * <
+ * :sortkeyval
- true if the sort key value is less than
+ * :sortkeyval
.
- * sortKeyName
- * <=
- * :sortkeyval
- true if the sort key value is less than or equal to
- * :sortkeyval
.
+ * sortKeyName
+ * <=
+ * :sortkeyval
- true if the sort key value is less than or equal to
+ * :sortkeyval
.
- * sortKeyName
- * >
- * :sortkeyval
- true if the sort key value is greater than :sortkeyval
.
+ * sortKeyName
+ * >
+ * :sortkeyval
- true if the sort key value is greater than
+ * :sortkeyval
.
- * sortKeyName
- * >=
- * :sortkeyval
- true if the sort key value is greater than
- * or equal to :sortkeyval
.
+ * sortKeyName
+ * >=
+ * :sortkeyval
- true if the sort key value is greater than or equal
+ * to :sortkeyval
.
- * sortKeyName
- * BETWEEN
- * :sortkeyval1
- * AND
- * :sortkeyval2
- true if the sort key value is greater than or equal to
- * :sortkeyval1
, and less than or equal to :sortkeyval2
.
+ * sortKeyName
+ * BETWEEN
+ * :sortkeyval1
+ * AND
+ * :sortkeyval2
- true if the sort key value is greater than or equal
+ * to :sortkeyval1
, and less than or equal to
+ * :sortkeyval2
.
- * begins_with (
- * sortKeyName
, :sortkeyval
- * )
-
- * true if the sort key value begins with a particular operand. (You cannot use this function with a sort key that is of type Number.) Note that the function name
- * begins_with
is case-sensitive.
+ * begins_with (
+ * sortKeyName
, :sortkeyval
+ * )
- true if the sort key value begins with a particular operand.
+ * (You cannot use this function with a sort key that is of type Number.) Note that
+ * the function name begins_with
is case-sensitive.
Use the ExpressionAttributeValues
parameter to replace tokens such as
- * :partitionval
and :sortval
with actual values at runtime.
Use the ExpressionAttributeValues
parameter to replace tokens such as
+ * :partitionval
and :sortval
with actual values at
+ * runtime.
You can optionally use the ExpressionAttributeNames
parameter to replace the names of
- * the partition key and sort key with placeholder tokens. This option might be necessary if an attribute
- * name conflicts with a DynamoDB reserved word. For example, the following
- * KeyConditionExpression
parameter causes an error because Size is a reserved
- * word:
- * Size = :myval
- *
You can optionally use the ExpressionAttributeNames
parameter to replace
+ * the names of the partition key and sort key with placeholder tokens. This option might
+ * be necessary if an attribute name conflicts with a DynamoDB reserved word. For example,
+ * the following KeyConditionExpression
parameter causes an error because
+ * Size is a reserved word:
+ * Size = :myval
+ *
To work around this, define a placeholder (such a #S
) to represent the attribute
- * name Size. KeyConditionExpression
then is as follows:
To work around this, define a placeholder (such a #S
) to represent the
+ * attribute name Size. KeyConditionExpression
then is as
+ * follows:
- * #S = :myval
- *
+ * #S = :myval
+ *
For a list of reserved words, see Reserved - * Words in the Amazon DynamoDB Developer Guide.
+ *For a list of reserved words, see Reserved Words + * in the Amazon DynamoDB Developer Guide.
* - *For more information on ExpressionAttributeNames
and ExpressionAttributeValues
,
- * see Using Placeholders for Attribute
- * Names and Values in the Amazon DynamoDB Developer Guide.
For more information on ExpressionAttributeNames
and
+ * ExpressionAttributeValues
, see Using
+ * Placeholders for Attribute Names and Values in the Amazon DynamoDB
+ * Developer Guide.
One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide). To work around this, you could specify the following for
- * ExpressionAttributeNames
:
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
+ * Guide). To work around this, you could specify the following for
+ * ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; /** *One or more values that can be substituted in an expression.
- *Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the - * ProductStatus attribute was one of the following:
- *+ *
Use the : (colon) character in an expression to + * dereference an attribute value. For example, suppose that you wanted to check whether + * the value of the ProductStatus attribute was one of the following:
+ *
* Available | Backordered | Discontinued
- *
You would first need to specify ExpressionAttributeValues
as follows:
- * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }
- *
You could then use these values in an expression, such as this:
- *+ *
+ *You would first need to specify ExpressionAttributeValues
as
+ * follows:
+ * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ * ":disc":{"S":"Discontinued"} }
+ *
You could then use these values in an expression, such as this:
+ *
* ProductStatus IN (:avail, :back, :disc)
- *
For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer Guide.
+ * + *For more information on expression attribute values, see Specifying Conditions in the Amazon DynamoDB Developer + * Guide.
*/ ExpressionAttributeValues?: { [key: string]: AttributeValue }; } @@ -10098,81 +10663,93 @@ export namespace QueryInput { export interface BatchWriteItemOutput { /** *A map of tables and requests against those tables that were not processed. The
- * UnprocessedItems
value is in the same form as RequestItems
, so you can provide
- * this value directly to a subsequent BatchGetItem
operation. For more information, see
- * RequestItems
in the Request Parameters section.
Each UnprocessedItems
entry consists of a table name and, for that table, a list of
- * operations to perform (DeleteRequest
or PutRequest
).
- * DeleteRequest
- Perform a DeleteItem
operation on the specified item. The
- * item to be deleted is identified by a Key
subelement:
UnprocessedItems
value is in the same form as
+ * RequestItems
, so you can provide this value directly to a subsequent
+ * BatchGetItem
operation. For more information, see
+ * RequestItems
in the Request Parameters section.
+ * Each UnprocessedItems
entry consists of a table name and, for that table,
+ * a list of operations to perform (DeleteRequest
or
+ * PutRequest
).
+ * DeleteRequest
- Perform a DeleteItem
operation on the
+ * specified item. The item to be deleted is identified by a Key
+ * subelement:
- * Key
- A map of primary key attribute values that uniquely identify the item.
- * Each entry in this map consists of an attribute name and an attribute value.
+ * Key
- A map of primary key attribute values that uniquely
+ * identify the item. Each entry in this map consists of an attribute name
+ * and an attribute value.
- * PutRequest
- Perform a PutItem
operation on the specified item. The item to
- * be put is identified by an Item
subelement:
+ * PutRequest
- Perform a PutItem
operation on the
+ * specified item. The item to be put is identified by an Item
+ * subelement:
- * Item
- A map of attributes and their values. Each entry in this map consists
- * of an attribute name and an attribute value. Attribute values must not be null; string
- * and binary type attributes must have lengths greater than zero; and set type
- * attributes must not be empty. Requests that contain empty values will be rejected with
- * a ValidationException
exception.
If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table's attribute definition.
- *
+ * Item
- A map of attributes and their values. Each entry in
+ * this map consists of an attribute name and an attribute value. Attribute
+ * values must not be null; string and binary type attributes must have
+ * lengths greater than zero; and set type attributes must not be empty.
+ * Requests that contain empty values will be rejected with a
+ * ValidationException
exception.
If you specify any attributes that are part of an index key, then the + * data types for those attributes must match those of the schema in the + * table's attribute definition.
+ *If there are no unprocessed items remaining, the response contains an empty
- * UnprocessedItems
map.
If there are no unprocessed items remaining, the response contains an empty
+ * UnprocessedItems
map.
A list of tables that were processed by BatchWriteItem
and, for each table,
- * information about any item collections that were affected by individual DeleteItem
or
- * PutItem
operations.
Each entry consists of the following subelements:
- *A list of tables that were processed by BatchWriteItem
and, for each
+ * table, information about any item collections that were affected by individual
+ * DeleteItem
or PutItem
operations.
Each entry consists of the following subelements:
+ *
- * ItemCollectionKey
- The partition key value of the item collection. This is the same as
- * the partition key value of the item.
+ * ItemCollectionKey
- The partition key value of the item collection.
+ * This is the same as the partition key value of the item.
- * SizeEstimateRangeGB
- An estimate of item collection size, expressed in GB. This is
- * a two-element array containing a lower bound and an upper bound for the estimate. The
- * estimate includes the size of all the items in the table, plus the size of all attributes
- * projected into all of the local secondary indexes on the table. Use this estimate to measure whether a
- * local secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
+ *
+ * SizeEstimateRangeGB
- An estimate of item collection size,
+ * expressed in GB. This is a two-element array containing a lower bound and an
+ * upper bound for the estimate. The estimate includes the size of all the items in
+ * the table, plus the size of all attributes projected into all of the local
+ * secondary indexes on the table. Use this estimate to measure whether a local
+ * secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the + * precision or accuracy of the estimate.
*The capacity units consumed by the entire BatchWriteItem
operation.
Each element consists of:
- *The capacity units consumed by the entire BatchWriteItem
+ * operation.
Each element consists of:
+ *
- * TableName
- The table that consumed the provisioned throughput.
+ * TableName
- The table that consumed the provisioned
+ * throughput.
- * CapacityUnits
- The total number of capacity units consumed.
+ * CapacityUnits
- The total number of capacity units consumed.
The primary key of the item to be updated. Each element consists of an attribute name and a value for that attribute.
- *For the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.
+ *The primary key of the item to be updated. Each element consists of an attribute name + * and a value for that attribute.
+ *For the primary key, you must provide all of the attributes. For example, with a + * simple primary key, you only need to provide a value for the partition key. For a + * composite primary key, you must provide values for both the partition key and the sort + * key.
*/ Key: { [key: string]: AttributeValue } | undefined; /** - *This is a legacy parameter. Use UpdateExpression
instead. For more information, see
- * AttributeUpdates in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use UpdateExpression
instead. For more
+ * information, see AttributeUpdates in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * Expected in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see Expected in the Amazon DynamoDB Developer
+ * Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more information, see
- * ConditionalOperator in the Amazon DynamoDB Developer Guide.
This is a legacy parameter. Use ConditionExpression
instead. For more
+ * information, see ConditionalOperator in the Amazon DynamoDB Developer
+ * Guide.
Use ReturnValues
if you want to get the item attributes as they appear
* before or after they are updated. For UpdateItem
, the valid values
* are:
- * NONE
- If ReturnValues
is not specified, or if its value is
- * NONE
, then nothing is returned. (This setting is the default for
- * ReturnValues
.)
+ * NONE
- If ReturnValues
is not specified, or if its
+ * value is NONE
, then nothing is returned. (This setting is the
+ * default for ReturnValues
.)
- * ALL_OLD
- Returns all of the attributes of the item, as they appeared before the UpdateItem operation.
+ * ALL_OLD
- Returns all of the attributes of the item, as they
+ * appeared before the UpdateItem operation.
- * UPDATED_OLD
- Returns only the updated attributes, as they appeared before the UpdateItem operation.
+ * UPDATED_OLD
- Returns only the updated attributes, as they appeared
+ * before the UpdateItem operation.
- * ALL_NEW
- Returns all of the attributes of the item, as they appear after the UpdateItem operation.
+ * ALL_NEW
- Returns all of the attributes of the item, as they appear
+ * after the UpdateItem operation.
- * UPDATED_NEW
- Returns only the updated attributes, as they appear after the UpdateItem operation.
+ * UPDATED_NEW
- Returns only the updated attributes, as they appear
+ * after the UpdateItem operation.
There is no additional cost associated with requesting a return value aside from the + *
There is no additional cost associated with requesting a return value aside from the * small network and processing overhead of receiving a larger response. No read capacity * units are consumed.
- *The values returned are strongly consistent.
+ *The values returned are strongly consistent.
*/ ReturnValues?: ReturnValue | string; /** - *Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Determines whether item collection metrics are returned. If set to SIZE
, the response includes statistics about item collections, if any, that were modified during
- * the operation are returned in the response. If set to NONE
(the default), no statistics are returned.
Determines whether item collection metrics are returned. If set to SIZE
,
+ * the response includes statistics about item collections, if any, that were modified
+ * during the operation are returned in the response. If set to NONE
(the
+ * default), no statistics are returned.
An expression that defines one or more attributes to be updated, the action to be * performed on them, and new values for them.
- *The following action values are available for UpdateExpression
.
The following action values are available for UpdateExpression
.
+ *
* SET
- Adds one or more attributes and values to an item. If any of
* these attributes already exist, they are replaced by the new values. You can
* also use SET
to add or subtract from an attribute that is of type
* Number. For example: SET myNum = myNum + :val
*
- * SET
supports the following functions:
+ * SET
supports the following functions:
- * if_not_exists (path, operand)
- if the item does not contain an attribute at the specified path, then if_not_exists
evaluates to operand; otherwise, it evaluates to path. You can use this function to avoid overwriting an attribute that may already be present in the item.
+ * if_not_exists (path, operand)
- if the item does not
+ * contain an attribute at the specified path, then
+ * if_not_exists
evaluates to operand; otherwise, it
+ * evaluates to path. You can use this function to avoid overwriting an
+ * attribute that may already be present in the item.
- * list_append (operand, operand)
- evaluates to a list with a new element added to it. You can append the new element to the start or the end of the list by reversing the order of the operands.
+ * list_append (operand, operand)
- evaluates to a list with a
+ * new element added to it. You can append the new element to the start or
+ * the end of the list by reversing the order of the operands.
These function names are case-sensitive.
+ *These function names are case-sensitive.
* *
- * REMOVE
- Removes one or more attributes from an item.
+ * REMOVE
- Removes one or more attributes from an item.
- * ADD
- Adds the specified value to the item, if the attribute does not already exist. If the attribute does exist, then the behavior of
- * ADD
depends on the data type of the attribute:
+ * ADD
- Adds the specified value to the item, if the attribute does
+ * not already exist. If the attribute does exist, then the behavior of
+ * ADD
depends on the data type of the attribute:
If the existing attribute is a number, and if Value
is also a number, then
- * Value
is mathematically added to the existing attribute. If Value
is a
- * negative number, then it is subtracted from the existing attribute.
If you use ADD
to increment or decrement a number value for an item
- * that doesn't exist before the update, DynamoDB uses 0
as the initial
- * value.
Similarly, if you use ADD
for an existing item to increment
- * or decrement an attribute value that doesn't exist before the
- * update, DynamoDB uses 0
as the initial value. For
+ *
If the existing attribute is a number, and if Value
is
+ * also a number, then Value
is mathematically added to the
+ * existing attribute. If Value
is a negative number, then it
+ * is subtracted from the existing attribute.
If you use ADD
to increment or decrement a number
+ * value for an item that doesn't exist before the update, DynamoDB
+ * uses 0
as the initial value.
Similarly, if you use ADD
for an existing item to
+ * increment or decrement an attribute value that doesn't exist before
+ * the update, DynamoDB uses 0
as the initial value. For
* example, suppose that the item you want to update doesn't have an
* attribute named itemcount
, but you decide to
* ADD
the number 3
to this attribute
@@ -10354,134 +10961,151 @@ export interface UpdateItemInput {
* 3
to it. The result will be a new
* itemcount
attribute in the item, with a value of
* 3
.
If the existing data type is a set and if Value
is also a set, then
- * Value
is added to the existing set. For example, if the attribute value is the set
- * [1,2]
, and the ADD
action specified [3]
, then
- * the final attribute value is [1,2,3]
. An error occurs if an ADD
- * action is specified for a set attribute and the attribute type specified does not
- * match the existing set type.
Both sets must have the same primitive data type. For example, if the existing data
- * type is a set of strings, the Value
must also be a set of strings.
If the existing data type is a set and if Value
is also a
+ * set, then Value
is added to the existing set. For example,
+ * if the attribute value is the set [1,2]
, and the
+ * ADD
action specified [3]
, then the final
+ * attribute value is [1,2,3]
. An error occurs if an
+ * ADD
action is specified for a set attribute and the
+ * attribute type specified does not match the existing set type.
Both sets must have the same primitive data type. For example, if the
+ * existing data type is a set of strings, the Value
must also
+ * be a set of strings.
The ADD
action only supports Number and set data types. In addition,
- * ADD
can only be used on top-level attributes, not nested attributes.
- * DELETE
- Deletes an element from a set.
If a set of values is specified, then those values are subtracted from the old
- * set. For example, if the attribute value was the set [a,b,c]
and the
- * DELETE
action specifies [a,c]
, then the final attribute value
- * is [b]
. Specifying an empty set is an error.
The DELETE
action only supports set data types. In addition,
- * DELETE
can only be used on top-level attributes, not nested attributes.
The ADD
action only supports Number and set data types. In
+ * addition, ADD
can only be used on top-level attributes, not
+ * nested attributes.
+ * DELETE
- Deletes an element from a set.
If a set of values is specified, then those values are subtracted from the old
+ * set. For example, if the attribute value was the set [a,b,c]
and
+ * the DELETE
action specifies [a,c]
, then the final
+ * attribute value is [b]
. Specifying an empty set is an error.
The DELETE
action only supports set data types. In addition,
+ * DELETE
can only be used on top-level attributes, not nested
+ * attributes.
You can have many actions in a single expression, such as the following: SET a=:value1,
- * b=:value2 DELETE :value3, :value4, :value5
- *
For more information on update expressions, see Modifying Items and Attributes in the Amazon DynamoDB Developer Guide.
+ *You can have many actions in a single expression, such as the following: SET
+ * a=:value1, b=:value2 DELETE :value3, :value4, :value5
+ *
For more information on update expressions, see Modifying + * Items and Attributes in the Amazon DynamoDB Developer + * Guide.
*/ UpdateExpression?: string; /** - *A condition that must be satisfied in order for a conditional update to succeed.
- *An expression can contain any of the following:
- *A condition that must be satisfied in order for a conditional update to + * succeed.
+ *An expression can contain any of the following:
+ *Functions: attribute_exists | attribute_not_exists | attribute_type | contains | begins_with | size
- *
These function names are case-sensitive.
+ *Functions: attribute_exists | attribute_not_exists | attribute_type |
+ * contains | begins_with | size
+ *
These function names are case-sensitive.
*Comparison operators: Comparison operators: = | <> |
+ *
= | <> |
* < | > | <= | >= |
* BETWEEN | IN
- *
Logical operators: AND | OR | NOT
- *
Logical operators: AND | OR | NOT
+ *
For more information about condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer + *
For more information about condition expressions, see Specifying Conditions in the Amazon DynamoDB Developer * Guide.
*/ ConditionExpression?: string; /** - *One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames
:
One or more substitution tokens for attribute names in an expression. The following
+ * are some use cases for using ExpressionAttributeNames
:
To access an attribute whose name conflicts with a DynamoDB reserved word.
+ *To access an attribute whose name conflicts with a DynamoDB reserved + * word.
*To create a placeholder for repeating occurrences of an attribute name in an expression.
+ *To create a placeholder for repeating occurrences of an attribute name in an + * expression.
*To prevent special characters in an attribute name from being misinterpreted in an expression.
+ *To prevent special characters in an attribute name from being misinterpreted + * in an expression.
*Use the # character in an expression to dereference an attribute name. For example, consider the following attribute name:
- *Use the # character in an expression to dereference + * an attribute name. For example, consider the following attribute name:
+ *
- * Percentile
- *
+ * Percentile
+ *
The name of this attribute conflicts with a reserved word, so it cannot be used directly - * in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer + *
The name of this attribute conflicts with a reserved word, so it cannot be used
+ * directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer
* Guide.) To work around this, you could specify the following for
* ExpressionAttributeNames
:
- * {"#P":"Percentile"}
- *
+ * {"#P":"Percentile"}
+ *
You could then use this substitution in an expression, as in this example:
- *You could then use this substitution in an expression, as in this example:
+ *
- * #P = :val
- *
+ * #P = :val
+ *
Tokens that begin with the : character are expression attribute values, which are placeholders for the actual value at runtime.
- *For more information about expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer
+ * Tokens that begin with the : character are
+ * expression attribute values, which are placeholders for the
+ * actual value at runtime.
For more information about expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer * Guide.
*/ ExpressionAttributeNames?: { [key: string]: string }; /** *One or more values that can be substituted in an expression.
- *Use the : (colon) character in an expression to + *
Use the : (colon) character in an expression to
* dereference an attribute value. For example, suppose that you wanted to check whether
* the value of the ProductStatus
attribute was one of the following:
+ *
* Available | Backordered | Discontinued
- *
You would first need to specify ExpressionAttributeValues
as follows:
- * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} }
- *
You could then use these values in an expression, such as this:
- *+ *
+ *You would first need to specify ExpressionAttributeValues
as
+ * follows:
+ * { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ * ":disc":{"S":"Discontinued"} }
+ *
You could then use these values in an expression, such as this:
+ *
* ProductStatus IN (:avail, :back, :disc)
- *
For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer * Guide.
*/ @@ -10534,7 +11158,8 @@ export namespace UpdateItemInput { } /** - *A list of requests that can perform update, put, delete, or check operations on multiple items in one or more tables atomically.
+ *A list of requests that can perform update, put, delete, or check operations on + * multiple items in one or more tables atomically.
*/ export interface TransactWriteItem { /** @@ -10576,55 +11201,64 @@ export interface TransactWriteItemsInput { *An ordered array of up to 25 TransactWriteItem
objects, each of which
* contains a ConditionCheck
, Put
, Update
, or
* Delete
object. These can operate on items in different tables, but the
- * tables must reside in the same AWS account and Region, and no two of them can operate on
- * the same item.
Determines the level of detail about provisioned throughput consumption that is returned in the response:
- *Determines the level of detail about provisioned throughput consumption that is + * returned in the response:
+ *
- * INDEXES
- The response includes the aggregate ConsumedCapacity
for the operation, together with ConsumedCapacity
for each table and secondary index that was accessed.
Note that some operations, such as GetItem
and BatchGetItem
, do not access any indexes at all. In these cases, specifying INDEXES
will only return ConsumedCapacity
information for table(s).
+ * INDEXES
- The response includes the aggregate
+ * ConsumedCapacity
for the operation, together with
+ * ConsumedCapacity
for each table and secondary index that was
+ * accessed.
Note that some operations, such as GetItem
and
+ * BatchGetItem
, do not access any indexes at all. In these cases,
+ * specifying INDEXES
will only return ConsumedCapacity
+ * information for table(s).
- * TOTAL
- The response includes only the aggregate ConsumedCapacity
for the operation.
+ * TOTAL
- The response includes only the aggregate
+ * ConsumedCapacity
for the operation.
- * NONE
- No ConsumedCapacity
details are included in the response.
+ * NONE
- No ConsumedCapacity
details are included in the
+ * response.
Determines whether item collection metrics are returned. If set to
- * SIZE
, the response includes statistics about item collections (if any), that
- * were modified during the operation and are returned in the response.
- * If set to NONE
(the default), no statistics are returned.
- *
Determines whether item collection metrics are returned. If set to SIZE
,
+ * the response includes statistics about item collections (if any), that were modified
+ * during the operation and are returned in the response. If set to NONE
(the
+ * default), no statistics are returned.
Providing a ClientRequestToken
makes the call to TransactWriteItems
- * idempotent, meaning that multiple identical calls have the same effect as one single call.
Although multiple identical calls using the same client request token produce the same + *
Providing a ClientRequestToken
makes the call to
+ * TransactWriteItems
idempotent, meaning that multiple identical calls
+ * have the same effect as one single call.
Although multiple identical calls using the same client request token produce the same
* result on the server (no side effects), the responses to the calls might not be the
* same. If the ReturnConsumedCapacity>
parameter is set, then the initial
* TransactWriteItems
call returns the amount of write capacity units
* consumed in making the changes. Subsequent TransactWriteItems
calls with
* the same client token return the number of read capacity units consumed in reading the
* item.
A client request token is valid for 10 minutes after the first request that uses it is + *
A client request token is valid for 10 minutes after the first request that uses it is * completed. After 10 minutes, any request with the same client token is treated as a new * request. Do not resubmit the same request with the same client token for more than 10 * minutes, or the result might not be idempotent.
- *If you submit a request with the same client token but a change in other parameters + *
If you submit a request with the same client token but a change in other parameters
* within the 10-minute idempotency window, DynamoDB returns an
* IdempotentParameterMismatch
exception.
The ID of a VPC peering connection.
*/ VpcPeeringConnectionId?: string; + + CoreNetworkArn?: string; } export namespace CreateRouteRequest { @@ -6299,6 +6301,8 @@ export interface Route { *The ID of a VPC peering connection.
*/ VpcPeeringConnectionId?: string; + + CoreNetworkArn?: string; } export namespace Route { diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index 667070f34bef8..d0327f311c71c 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -845,6 +845,13 @@ export namespace CreateVpnConnectionRequest { }); } +export enum GatewayAssociationState { + associated = "associated", + associating = "associating", + disassociating = "disassociating", + not_associated = "not-associated", +} + /** *The internet key exchange (IKE) version permitted for the VPN tunnel.
*/ @@ -1293,7 +1300,7 @@ export interface VpnConnection { /** *The current state of the gateway association.
*/ - GatewayAssociationState?: string; + GatewayAssociationState?: GatewayAssociationState | string; /** *The VPN connection options.
@@ -9467,35 +9474,3 @@ export namespace DescribeHostsRequest { ...obj, }); } - -/** - *Information about the number of instances that can be launched onto the Dedicated - * Host.
- */ -export interface InstanceCapacity { - /** - *The number of instances that can be launched onto the Dedicated Host based on the - * host's available capacity.
- */ - AvailableCapacity?: number; - - /** - *The instance type supported by the Dedicated Host.
- */ - InstanceType?: string; - - /** - *The total number of instances that can be launched onto the Dedicated Host if there - * are no instances running on it.
- */ - TotalCapacity?: number; -} - -export namespace InstanceCapacity { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InstanceCapacity): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_3.ts b/clients/client-ec2/src/models/models_3.ts index 14939af9f959d..866eecea2d039 100644 --- a/clients/client-ec2/src/models/models_3.ts +++ b/clients/client-ec2/src/models/models_3.ts @@ -23,7 +23,6 @@ import { ReservedInstancesListing, ResourceType, SecurityGroupRule, - Subnet, Tag, TagSpecification, TargetCapacityUnitType, @@ -68,12 +67,43 @@ import { EventInformation, Filter, IdFormat, - InstanceCapacity, InstanceTagNotificationAttribute, PermissionGroup, ProductCode, } from "./models_2"; +/** + *Information about the number of instances that can be launched onto the Dedicated + * Host.
+ */ +export interface InstanceCapacity { + /** + *The number of instances that can be launched onto the Dedicated Host based on the + * host's available capacity.
+ */ + AvailableCapacity?: number; + + /** + *The instance type supported by the Dedicated Host.
+ */ + InstanceType?: string; + + /** + *The total number of instances that can be launched onto the Dedicated Host if there + * are no instances running on it.
+ */ + TotalCapacity?: number; +} + +export namespace InstanceCapacity { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InstanceCapacity): any => ({ + ...obj, + }); +} + /** *The capacity information for instances that can be launched onto the Dedicated Host.
*/ @@ -12955,24 +12985,3 @@ export namespace DescribeSubnetsRequest { ...obj, }); } - -export interface DescribeSubnetsResult { - /** - *Information about one or more subnets.
- */ - Subnets?: Subnet[]; - - /** - *The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Information about one or more subnets.
+ */ + Subnets?: Subnet[]; + + /** + *The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Checks whether you have the required permissions for the action, without actually making the request, @@ -10012,33 +10034,3 @@ export namespace InstanceCreditSpecificationRequest { ...obj, }); } - -export interface ModifyInstanceCreditSpecificationRequest { - /** - *
Checks whether you have the required permissions for the action, without actually making the request,
- * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
- * Otherwise, it is UnauthorizedOperation
.
A unique, case-sensitive token that you provide to ensure idempotency of your - * modification request. For more information, see Ensuring - * Idempotency.
- */ - ClientToken?: string; - - /** - *Information about the credit option for CPU usage.
- */ - InstanceCreditSpecifications: InstanceCreditSpecificationRequest[] | undefined; -} - -export namespace ModifyInstanceCreditSpecificationRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModifyInstanceCreditSpecificationRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_5.ts b/clients/client-ec2/src/models/models_5.ts index a9d0f864edd73..2459cb3294813 100644 --- a/clients/client-ec2/src/models/models_5.ts +++ b/clients/client-ec2/src/models/models_5.ts @@ -94,7 +94,43 @@ import { SpotInstanceRequest, SpotPlacement, } from "./models_3"; -import { CapacityReservationSpecification, OperationType, Purchase, VolumeModification } from "./models_4"; +import { + CapacityReservationSpecification, + InstanceCreditSpecificationRequest, + OperationType, + Purchase, + VolumeModification, +} from "./models_4"; + +export interface ModifyInstanceCreditSpecificationRequest { + /** + *Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
+ * Otherwise, it is UnauthorizedOperation
.
A unique, case-sensitive token that you provide to ensure idempotency of your + * modification request. For more information, see Ensuring + * Idempotency.
+ */ + ClientToken?: string; + + /** + *Information about the credit option for CPU usage.
+ */ + InstanceCreditSpecifications: InstanceCreditSpecificationRequest[] | undefined; +} + +export namespace ModifyInstanceCreditSpecificationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyInstanceCreditSpecificationRequest): any => ({ + ...obj, + }); +} /** *Describes the burstable performance instance whose credit option for CPU usage was
@@ -2759,7 +2795,7 @@ export interface ProvisionByoipCidrRequest {
PoolTagSpecifications?: TagSpecification[];
/**
- *
Reserved.
*/ MultiRegion?: boolean; } @@ -3928,6 +3964,8 @@ export interface ReplaceRouteRequest { *The ID of a VPC peering connection.
*/ VpcPeeringConnectionId?: string; + + CoreNetworkArn?: string; } export namespace ReplaceRouteRequest { diff --git a/clients/client-ec2/src/protocols/Aws_ec2.ts b/clients/client-ec2/src/protocols/Aws_ec2.ts index 695a0f4c93598..3015bbecd1dad 100644 --- a/clients/client-ec2/src/protocols/Aws_ec2.ts +++ b/clients/client-ec2/src/protocols/Aws_ec2.ts @@ -2316,7 +2316,6 @@ import { ImportInstanceTaskDetails, ImportInstanceVolumeDetailItem, ImportVolumeTaskDetails, - InstanceCapacity, InstanceEventWindowStateChange, InstanceTagNotificationAttribute, LoadPermission, @@ -2493,7 +2492,6 @@ import { DescribeStoreImageTasksRequest, DescribeStoreImageTasksResult, DescribeSubnetsRequest, - DescribeSubnetsResult, DiskInfo, EbsInfo, EbsInstanceBlockDevice, @@ -2524,6 +2522,7 @@ import { Instance, InstanceAttribute, InstanceBlockDeviceMapping, + InstanceCapacity, InstanceCreditSpecification, InstanceIpv4Prefix, InstanceIpv6Prefix, @@ -2623,6 +2622,7 @@ import { ClientCertificateRevocationListStatus, ClientData, CoipAddressUsage, + DescribeSubnetsResult, DescribeTagsRequest, DescribeTagsResult, DescribeTrafficMirrorFiltersRequest, @@ -2870,7 +2870,6 @@ import { ModifyInstanceAttributeRequest, ModifyInstanceCapacityReservationAttributesRequest, ModifyInstanceCapacityReservationAttributesResult, - ModifyInstanceCreditSpecificationRequest, PrefixListAssociation, PrefixListEntry, PrivateDnsDetails, @@ -2917,6 +2916,7 @@ import { InstanceStateChange, LaunchTemplateSpecification, LicenseConfigurationRequest, + ModifyInstanceCreditSpecificationRequest, ModifyInstanceCreditSpecificationResult, ModifyInstanceEventStartTimeRequest, ModifyInstanceEventStartTimeResult, @@ -35167,6 +35167,9 @@ const serializeAws_ec2CreateRouteRequest = (input: CreateRouteRequest, context: if (input.VpcPeeringConnectionId !== undefined && input.VpcPeeringConnectionId !== null) { entries["VpcPeeringConnectionId"] = input.VpcPeeringConnectionId; } + if (input.CoreNetworkArn !== undefined && input.CoreNetworkArn !== null) { + entries["CoreNetworkArn"] = input.CoreNetworkArn; + } return entries; }; @@ -48091,6 +48094,9 @@ const serializeAws_ec2ReplaceRouteRequest = (input: ReplaceRouteRequest, context if (input.VpcPeeringConnectionId !== undefined && input.VpcPeeringConnectionId !== null) { entries["VpcPeeringConnectionId"] = input.VpcPeeringConnectionId; } + if (input.CoreNetworkArn !== undefined && input.CoreNetworkArn !== null) { + entries["CoreNetworkArn"] = input.CoreNetworkArn; + } return entries; }; @@ -71395,6 +71401,7 @@ const deserializeAws_ec2Route = (output: any, context: __SerdeContext): Route => Origin: undefined, State: undefined, VpcPeeringConnectionId: undefined, + CoreNetworkArn: undefined, }; if (output["destinationCidrBlock"] !== undefined) { contents.DestinationCidrBlock = __expectString(output["destinationCidrBlock"]); @@ -71441,6 +71448,9 @@ const deserializeAws_ec2Route = (output: any, context: __SerdeContext): Route => if (output["vpcPeeringConnectionId"] !== undefined) { contents.VpcPeeringConnectionId = __expectString(output["vpcPeeringConnectionId"]); } + if (output["coreNetworkArn"] !== undefined) { + contents.CoreNetworkArn = __expectString(output["coreNetworkArn"]); + } return contents; }; diff --git a/clients/client-ecr-public/src/endpoints.ts b/clients/client-ecr-public/src/endpoints.ts index 2c1925def71cd..db2f79aff97e5 100644 --- a/clients/client-ecr-public/src/endpoints.ts +++ b/clients/client-ecr-public/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "api.ecr-public.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "api.ecr-public-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "api.ecr-public.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "api.ecr-public-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-ecr/src/endpoints.ts b/clients/client-ecr/src/endpoints.ts index 8e438ec364d99..4a8dba7d6bdda 100644 --- a/clients/client-ecr/src/endpoints.ts +++ b/clients/client-ecr/src/endpoints.ts @@ -359,6 +359,10 @@ const partitionHash: PartitionHash = { hostname: "api.ecr.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "api.ecr-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -369,6 +373,10 @@ const partitionHash: PartitionHash = { hostname: "api.ecr.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "api.ecr-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-ecs/README.md b/clients/client-ecs/README.md index 5d13d13f04239..830a4e16dc6bf 100644 --- a/clients/client-ecs/README.md +++ b/clients/client-ecs/README.md @@ -9,18 +9,18 @@ AWS SDK for JavaScript ECS Client for Node.js, Browser and React Native.Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes +
Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes it easy to run, stop, and manage Docker containers on a cluster. You can host your -cluster on a serverless infrastructure that is managed by Amazon ECS by launching your +cluster on a serverless infrastructure that's managed by Amazon ECS by launching your services or tasks on Fargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.
Amazon ECS makes it easy to launch and stop container-based applications with simple API -calls, allows you to get the state of your cluster from a centralized service, and gives -you access to many familiar Amazon EC2 features.
+calls. This makes it easy to get the state of your cluster from a centralized service, +and gives you access to many familiar Amazon EC2 features.You can use Amazon ECS to schedule the placement of containers across your cluster based on -your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates -the need for you to operate your own cluster management and configuration management -systems or worry about scaling your management infrastructure.
+your resource needs, isolation policies, and availability requirements. With Amazon ECS, you +don't need to operate your own cluster management and configuration management systems. +You also don't need to worry about scaling your management infrastructure. ## Installing diff --git a/clients/client-ecs/src/ECS.ts b/clients/client-ecs/src/ECS.ts index 4f7378799ffc6..35c0e0e9e7962 100644 --- a/clients/client-ecs/src/ECS.ts +++ b/clients/client-ecs/src/ECS.ts @@ -244,28 +244,28 @@ import { ECSClient } from "./ECSClient"; /** *Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes + *
Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes * it easy to run, stop, and manage Docker containers on a cluster. You can host your - * cluster on a serverless infrastructure that is managed by Amazon ECS by launching your + * cluster on a serverless infrastructure that's managed by Amazon ECS by launching your * services or tasks on Fargate. For more control, you can host your tasks on a cluster * of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.
*Amazon ECS makes it easy to launch and stop container-based applications with simple API - * calls, allows you to get the state of your cluster from a centralized service, and gives - * you access to many familiar Amazon EC2 features.
+ * calls. This makes it easy to get the state of your cluster from a centralized service, + * and gives you access to many familiar Amazon EC2 features. *You can use Amazon ECS to schedule the placement of containers across your cluster based on - * your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates - * the need for you to operate your own cluster management and configuration management - * systems or worry about scaling your management infrastructure.
+ * your resource needs, isolation policies, and availability requirements. With Amazon ECS, you + * don't need to operate your own cluster management and configuration management systems. + * You also don't need to worry about scaling your management infrastructure. */ export class ECS extends ECSClient { /** *Creates a new capacity provider. Capacity providers are associated with an Amazon ECS * cluster and are used in capacity provider strategies to facilitate cluster auto * scaling.
- *Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on
- * Fargate use the FARGATE
and FARGATE_SPOT
capacity providers
- * which are already created and available to all accounts in Regions supported by
- * Fargate.
Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on
+ * Fargate use the FARGATE
and FARGATE_SPOT
capacity providers.
+ * These providers are available to all accounts in the Amazon Web Services Regions that Fargate
+ * supports.
CreateCluster
action.
* When you call the CreateCluster API operation, Amazon ECS attempts to - * create the Amazon ECS service-linked role for your account so that required resources in - * other Amazon Web Services services can be managed on your behalf. However, if the IAM user that - * makes the call does not have permissions to create the service-linked role, it is - * not created. For more information, see Using + * create the Amazon ECS service-linked role for your account. This is so that it can manage + * required resources in other Amazon Web Services services on your behalf. However, if the IAM user + * that makes the call doesn't have permissions to create the service-linked role, it + * isn't created. For more information, see Using * Service-Linked Roles for Amazon ECS in the * Amazon Elastic Container Service Developer Guide.
*Runs and maintains a desired number of tasks from a specified task definition. If the
- * number of tasks running in a service drops below the desiredCount
, Amazon ECS
- * runs another copy of the task in the specified cluster. To update an existing service,
- * see the UpdateService action.
Runs and maintains your desired number of tasks from a specified task definition. If
+ * the number of tasks running in a service drops below the desiredCount
,
+ * Amazon ECS runs another copy of the task in the specified cluster. To update an existing
+ * service, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can * optionally run your service behind one or more load balancers. The load balancers * distribute traffic across the tasks that are associated with the service. For more * information, see Service Load Balancing in the * Amazon Elastic Container Service Developer Guide.
- *Tasks for services that do not use a load balancer are considered
- * healthy if they're in the RUNNING
state. Tasks for services that
- * do use a load balancer are considered healthy if they're in the
- * RUNNING
state and the container instance that they're hosted on is
- * reported as healthy by the load balancer.
Tasks for services that don't use a load balancer are considered healthy if they're in
+ * the RUNNING
state. Tasks for services that use a load balancer are
+ * considered healthy if they're in the RUNNING
state and the container
+ * instance that they're hosted on is reported as healthy by the load balancer.
There are two service scheduler strategies available:
*
* REPLICA
- The replica scheduling strategy places and
- * maintains the desired number of tasks across your cluster. By default, the
+ * maintains your desired number of tasks across your cluster. By default, the
* service scheduler spreads tasks across Availability Zones. You can use task
* placement strategies and constraints to customize task placement decisions. For
* more information, see Service Scheduler Concepts in the
@@ -370,50 +369,51 @@ export class ECS extends ECSClient {
* DAEMON
- The daemon scheduling strategy deploys exactly one
* task on each active container instance that meets all of the task placement
* constraints that you specify in your cluster. The service scheduler also
- * evaluates the task placement constraints for running tasks and will stop tasks
- * that do not meet the placement constraints. When using this strategy, you don't
+ * evaluates the task placement constraints for running tasks. It also stops tasks
+ * that don't meet the placement constraints. When using this strategy, you don't
* need to specify a desired number of tasks, a task placement strategy, or use
* Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the
* Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment
- * is triggered by changing properties, such as the task definition or the desired count of
- * a service, with an UpdateService operation. The default value for a
- * replica service for minimumHealthyPercent
is 100%. The default value for a
- * daemon service for minimumHealthyPercent
is 0%.
If a service is using the ECS
deployment controller, the minimum healthy
+ * is initiated by changing properties. For example, the deployment might be initiated by
+ * the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for
+ * minimumHealthyPercent
is 100%. The default value for a daemon service
+ * for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy
* percent represents a lower limit on the number of tasks in a service that must remain in
- * the RUNNING
state during a deployment, as a percentage of the desired
- * number of tasks (rounded up to the nearest integer), and while any container instances
- * are in the DRAINING
state if the service contains tasks using the
- * EC2 launch type. This parameter enables you to deploy without using
- * additional cluster capacity. For example, if your service has a desired number of four
- * tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks
- * to free up cluster capacity before starting two new tasks. Tasks for services that
- * do not use a load balancer are considered healthy if they're in
- * the RUNNING
state. Tasks for services that do use a
- * load balancer are considered healthy if they're in the RUNNING
state and
- * they're reported as healthy by the load balancer. The default value for minimum healthy
- * percent is 100%.
If a service is using the ECS
deployment controller, the maximum percent parameter represents an upper limit on the
+ * the RUNNING
state during a deployment. Specifically, it represents it as a
+ * percentage of your desired number of tasks (rounded up to the nearest integer). This
+ * happens when any of your container instances are in the DRAINING
state if
+ * the service contains tasks using the EC2 launch type. Using this
+ * parameter, you can deploy without using additional cluster capacity. For example, if you
+ * set your service to have desired number of four tasks and a minimum healthy percent of
+ * 50%, the scheduler might stop two existing tasks to free up cluster capacity before
+ * starting two new tasks. If they're in the RUNNING
state, tasks for services
+ * that don't use a load balancer are considered healthy . If they're in the
+ * RUNNING
state and reported as healthy by the load balancer, tasks for
+ * services that do use a load balancer are considered healthy . The
+ * default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the
* number of tasks in a service that are allowed in the RUNNING
or
- * PENDING
state during a deployment, as a percentage of the desired
- * number of tasks (rounded down to the nearest integer), and while any container instances
- * are in the DRAINING
state if the service contains tasks using the
- * EC2 launch type. This parameter enables you to define the deployment batch
- * size. For example, if your service has a desired number of four tasks and a maximum
- * percent value of 200%, the scheduler may start four new tasks before stopping the four
- * older tasks (provided that the cluster resources required to do this are available). The
- * default value for maximum percent is 200%.
If a service is using either the CODE_DEPLOY
or EXTERNAL
+ * PENDING
state during a deployment. Specifically, it represents it as a
+ * percentage of the desired number of tasks (rounded down to the nearest integer). This
+ * happens when any of your container instances are in the DRAINING
state if
+ * the service contains tasks using the EC2 launch type. Using this
+ * parameter, you can define the deployment batch size. For example, if your service has a
+ * desired number of four tasks and a maximum percent value of 200%, the scheduler may
+ * start four new tasks before stopping the four older tasks (provided that the cluster
+ * resources required to do this are available). The default value for maximum percent is
+ * 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
* deployment controller types and tasks that use the EC2 launch type, the
* minimum healthy percent and maximum percent values are used only to define the lower and upper limit
- * on the number of the tasks in the service that remain in the RUNNING
state
- * while the container instances are in the DRAINING
state. If the tasks in
- * the service use the Fargate launch type, the minimum healthy percent and
- * maximum percent values aren't used, although they're currently visible when describing
- * your service.
RUNNING
state.
+ * This is while the container instances are in the DRAINING
state. If the
+ * tasks in the service use the Fargate launch type, the minimum healthy
+ * percent and maximum percent values aren't used. This is the case even if they're
+ * currently visible when describing your service.
* When creating a service that uses the EXTERNAL
deployment controller, you
* can specify only parameters that aren't controlled at the task set level. The only
* required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Determine which of the container instances in your cluster can support your - * service's task definition (for example, they have the required CPU, memory, - * ports, and container instance attributes).
+ *Determine which of the container instances in your cluster can support the + * task definition of your service. For example, they have the required CPU, + * memory, ports, and container instance attributes.
*By default, the service scheduler attempts to balance tasks across
- * Availability Zones in this manner (although you can choose a different placement
- * strategy) with the placementStrategy
parameter):
placementStrategy
+ * parameter.
* Sort the valid container instances, giving priority to instances that @@ -439,7 +440,7 @@ export class ECS extends ECSClient { *
Place the new service task on a valid container instance in an optimal - * Availability Zone (based on the previous steps), favoring container + * Availability Zone based on the previous steps, favoring container * instances with the fewest number of running tasks for this * service.
*Deletes the specified capacity provider.
*The FARGATE
and FARGATE_SPOT
capacity providers are
- * reserved and cannot be deleted. You can disassociate them from a cluster using
- * either the PutClusterCapacityProviders API or by deleting the
+ * reserved and can't be deleted. You can disassociate them from a cluster using either
+ * the PutClusterCapacityProviders API or by deleting the
* cluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from
@@ -590,7 +591,7 @@ export class ECS extends ECSClient {
* strategy. When updating a service, the forceNewDeployment
option can be
* used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity
* provider are transitioned to use the capacity from the remaining capacity providers.
- * Only capacity providers that are not associated with a cluster can be deleted. To remove
+ * Only capacity providers that aren't associated with a cluster can be deleted. To remove
* a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Deletes the specified cluster. The cluster will transition to the
- * INACTIVE
state. Clusters with an INACTIVE
status may
- * remain discoverable in your account for a period of time. However, this behavior is
- * subject to change in the future, so you should not rely on INACTIVE
- * clusters persisting.
Deletes the specified cluster. The cluster transitions to the INACTIVE
+ * state. Clusters with an INACTIVE
status might remain discoverable in your
+ * account for a period of time. However, this behavior is subject to change in the future.
+ * We don't recommend that you rely on INACTIVE
clusters persisting.
You must deregister all container instances from this cluster before you may delete * it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.
*/ @@ -663,7 +663,7 @@ export class ECS extends ECSClient { /** *Deletes a specified service within a cluster. You can delete a service if you have no * running tasks in it and the desired task count is zero. If the service is actively - * maintaining tasks, you cannot delete it, and you must update the service to a desired + * maintaining tasks, you can't delete it, and you must update the service to a desired * task count of zero. For more information, see UpdateService.
*When you delete a service, if there are still running tasks that require cleanup, @@ -749,15 +749,16 @@ export class ECS extends ECSClient { *
Deregisters an Amazon ECS container instance from the specified cluster. This instance is * no longer available to run tasks.
*If you intend to use the container instance for some other purpose after - * deregistration, you should stop all of the tasks running on the container instance - * before deregistration. That prevents any orphaned tasks from consuming resources.
- *Deregistering a container instance removes the instance from a cluster, but it does - * not terminate the EC2 instance. If you are finished using the instance, be sure to - * terminate it in the Amazon EC2 console to stop billing.
+ * deregistration, we recommend that you stop all of the tasks running on the container + * instance before deregistration. That prevents any orphaned tasks from consuming + * resources. + *Deregistering a container instance removes the instance from a cluster, but it doesn't + * terminate the EC2 instance. If you are finished using the instance, be sure to terminate + * it in the Amazon EC2 console to stop billing.
*If you terminate a running container instance, Amazon ECS automatically deregisters the * instance from your cluster (stopped container instances or instances with - * disconnected agents are not automatically deregistered when terminated).
+ * disconnected agents aren't automatically deregistered when terminated). *INACTIVE
task definition continue to run without disruption.
* Existing services that reference an INACTIVE
task definition can still
* scale up or down by modifying the service's desired count.
- * You cannot use an INACTIVE
task definition to run new tasks or create new
- * services, and you cannot update an existing service to reference an
- * INACTIVE
task definition. However, there may be up to a 10-minute
- * window following deregistration where these restrictions have not yet taken
- * effect.
You can't use an INACTIVE
task definition to run new tasks or create new
+ * services, and you can't update an existing service to reference an INACTIVE
+ * task definition. However, there may be up to a 10-minute window following deregistration
+ * where these restrictions have not yet taken effect.
At this time, INACTIVE
task definitions remain discoverable in your
- * account indefinitely. However, this behavior is subject to change in the future, so
- * you should not rely on INACTIVE
task definitions persisting beyond the
- * lifecycle of any associated tasks and services.
INACTIVE
task definitions persisting
+ * beyond the lifecycle of any associated tasks and services.
* This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Returns an endpoint for - * the Amazon ECS agent to poll for updates.
+ *Returns an endpoint for the Amazon ECS agent to poll for updates.
*/ public discoverPollEndpoint( args: DiscoverPollEndpointCommandInput, @@ -1176,9 +1175,9 @@ export class ECS extends ECSClient { * When you specify a target type and cluster,ListAttributes
returns a list
* of attribute objects, one for each attribute on each resource. You can filter the list
* of results to a single attribute name to only return results that have that name. You
- * can also filter the results by attribute name and value, for example, to see which
- * container instances in a cluster are running a Linux AMI
- * (ecs.os-type=linux
).
+ * can also filter the results by attribute name and value. You can do this, for example,
+ * to see which container instances in a cluster are running a Linux AMI
+ * (ecs.os-type=linux
).
*/
public listAttributes(
args: ListAttributesCommandInput,
@@ -1336,12 +1335,12 @@ export class ECS extends ECSClient {
}
/**
- * Returns a list of task definition families that are registered to your account (which
- * may include task definition families that no longer have any ACTIVE
task
- * definition revisions).
You can filter out task definition families that do not contain any
- * ACTIVE
task definition revisions by setting the status
- * parameter to ACTIVE
. You can also filter the results with the
+ *
Returns a list of task definition families that are registered to your account. This
+ * list includes task definition families that no longer have any ACTIVE
task
+ * definition revisions.
You can filter out task definition families that don't contain any ACTIVE
+ * task definition revisions by setting the status
parameter to
+ * ACTIVE
. You can also filter the results with the
* familyPrefix
parameter.
Modifies an account setting. Account settings are set on a per-Region basis.
*If you change the account setting for the root user, the default settings for all of - * the IAM users and roles for which no individual account setting has been specified are - * reset. For more information, see Account + * the IAM users and roles that no individual account setting was specified are reset for. + * For more information, see Account * Settings in the Amazon Elastic Container Service Developer Guide.
*When serviceLongArnFormat
, taskLongArnFormat
, or
* containerInstanceLongArnFormat
are specified, the Amazon Resource Name
* (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or
* the root user for an account is affected. The opt-in and opt-out account setting must be
- * set for each Amazon ECS resource separately. The ARN and resource ID format of a resource
- * will be defined by the opt-in status of the IAM user or role that created the resource.
- * You must enable this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)
* limit for any new container instances that support the feature is changed. If
* awsvpcTrunking
is enabled, any new container instances that support the
@@ -1527,8 +1526,8 @@ export class ECS extends ECSClient {
}
/**
- *
Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, - * it is created. If the attribute exists, its value is replaced with the specified value. + *
Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, + * it's created. If the attribute exists, its value is replaced with the specified value. * To delete an attribute, use DeleteAttributes. For more information, * see Attributes in the * Amazon Elastic Container Service Developer Guide.
@@ -1568,14 +1567,14 @@ export class ECS extends ECSClient { *You must specify both the available capacity providers and a default capacity provider * strategy for the cluster. If the specified cluster has existing capacity providers * associated with it, you must specify all existing capacity providers in addition to any - * new ones you want to add. Any existing capacity providers associated with a cluster that - * are omitted from a PutClusterCapacityProviders API call will be - * disassociated with the cluster. You can only disassociate an existing capacity provider - * from a cluster if it's not being used by any existing tasks.
+ * new ones you want to add. Any existing capacity providers that are associated with a + * cluster that are omitted from a PutClusterCapacityProviders API call + * will be disassociated with the cluster. You can only disassociate an existing capacity + * provider from a cluster if it's not being used by any existing tasks. *When creating a service or running a task on a cluster, if no capacity provider or
* launch type is specified, then the cluster's default capacity provider strategy is used.
- * It is recommended to define a default capacity provider strategy for your cluster,
- * however you may specify an empty array ([]
) to bypass defining a default
+ * We recommend that you define a default capacity provider strategy for your cluster.
+ * However, you must specify an empty array ([]
) to bypass defining a default
* strategy.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Registers an EC2 - * instance into the specified cluster. This instance becomes available to place containers - * on.
+ *Registers an EC2 instance into the specified cluster. This instance becomes available + * to place containers on.
*/ public registerContainerInstance( args: RegisterContainerInstanceCommandInput, @@ -1653,7 +1651,7 @@ export class ECS extends ECSClient { *You can specify an IAM role for your task with the taskRoleArn
parameter.
* When you specify an IAM role for a task, its containers can then use the latest versions
* of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in
- * the IAM policy associated with the role. For more information, see IAM
+ * the IAM policy that's associated with the role. For more information, see IAM
* Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition
* with the networkMode
parameter. The available network modes correspond to
@@ -1701,11 +1699,11 @@ export class ECS extends ECSClient {
* Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.
- *The Amazon ECS API follows an eventual consistency model, due to the distributed nature of - * the system supporting the API. This means that the result of an API command you run that - * affects your Amazon ECS resources might not be immediately visible to all subsequent commands - * you run. Keep this in mind when you carry out an API command that immediately follows a - * previous API command.
+ *The Amazon ECS API follows an eventual consistency model. This is because the distributed + * nature of the system supporting the API. This means that the result of an API command + * you run that affects your Amazon ECS resources might not be immediately visible to all + * subsequent commands you run. Keep this in mind when you carry out an API command that + * immediately follows a previous API command.
*To manage eventual consistency, you can do the following:
*This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to - * acknowledge that an attachment changed states.
+ *Sent to acknowledge that an attachment changed states.
*/ public submitAttachmentStateChanges( args: SubmitAttachmentStateChangesCommandInput, @@ -1855,8 +1852,7 @@ export class ECS extends ECSClient { *This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to - * acknowledge that a container changed states.
+ *Sent to acknowledge that a container changed states.
*/ public submitContainerStateChange( args: SubmitContainerStateChangeCommandInput, @@ -1891,8 +1887,7 @@ export class ECS extends ECSClient { *This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to acknowledge - * that a task changed states.
+ *Sent to acknowledge that a task changed states.
*/ public submitTaskStateChange( args: SubmitTaskStateChangeCommandInput, @@ -1925,8 +1920,8 @@ export class ECS extends ECSClient { /** *Associates the specified tags to a resource with the specified
- * resourceArn
. If existing tags on a resource are not specified in the
- * request parameters, they are not changed. When a resource is deleted, the tags
+ * resourceArn
. If existing tags on a resource aren't specified in the
+ * request parameters, they aren't changed. When a resource is deleted, the tags that are
* associated with that resource are deleted as well.
Updates the Amazon ECS container agent on a specified container instance. Updating the - * Amazon ECS container agent does not interrupt running tasks or services on the container + * Amazon ECS container agent doesn't interrupt running tasks or services on the container * instance. The process for updating the agent differs depending on whether your container * instance was launched with the Amazon ECS-optimized AMI or another operating system.
*The UpdateContainerAgent
API isn't supported for container instances
* using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,
- * you can update the ecs-init
package which will update the agent. For
- * more information, see Updating the
+ * you can update the ecs-init
package. This updates the agent. For more
+ * information, see Updating the
* Amazon ECS container agent in the
* Amazon Elastic Container Service Developer Guide.
A container instance cannot be changed to DRAINING
until it has
+ *
A container instance can't be changed to DRAINING
until it has
* reached an ACTIVE
status. If the instance is in any other status, an
* error will be received.
RUNNING
+ * load balancer are considered healthy if they're in the RUNNING
* state. Tasks for services that use a load balancer are considered healthy if
- * they are in the RUNNING
state and the container instance they are
+ * they're in the RUNNING
state and the container instance they're
* hosted on is reported as healthy by the load balancer.
* The maximumPercent
parameter represents an upper limit on the
- * number of running tasks during task replacement, which enables you to define the
+ * number of running tasks during task replacement. You can use this to define the
* replacement batch size. For example, if desiredCount
is four tasks,
* a maximum of 200% starts four new tasks before stopping the four tasks to be
* drained, provided that the cluster resources required to do this are available.
@@ -2171,7 +2166,7 @@ export class ECS extends ECSClient {
*
Any PENDING
or RUNNING
tasks that do not belong to a service
- * are not affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
* tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to @@ -2222,12 +2217,12 @@ export class ECS extends ECSClient { * only the desired count, deployment configuration, task placement constraints and * strategies, and health check grace period can be updated using this API. If the network * configuration, platform version, or task definition need to be updated, a new CodeDeploy - * deployment should be created. For more information, see CreateDeployment in the CodeDeploy API Reference.
+ * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference. *For services using an external deployment controller, you can update only the desired * count, task placement constraints and strategies, and health check grace period using * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, you should create a new task set. For more - * information, see CreateTaskSet.
+ * task definition need to be updated, create a new task set. For more information, see + * CreateTaskSet. *You can add to or subtract from the number of instantiations of a task definition in a
* service by specifying the cluster that the service is running in and a new
* desiredCount
parameter.
If your updated Docker image uses the same tag as what is in the existing task
- * definition for your service (for example, my_image:latest
), you do not
+ * definition for your service (for example, my_image:latest
), you don't
* need to create a new revision of your task definition. You can update the service
* using the forceNewDeployment
option. The new tasks launched by the
* deployment pull the current image/tag combination from your repository when they
@@ -2253,15 +2248,15 @@ export class ECS extends ECSClient {
* desiredCount
temporarily during a deployment. For example, if
* desiredCount
is four tasks, a minimum of 50% allows the
* scheduler to stop two existing tasks before starting two new tasks. Tasks for
- * services that do not use a load balancer are considered healthy if they are in
- * the RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they are in the RUNNING
state and the
- * container instance they are hosted on is reported as healthy by the load
+ * services that don't use a load balancer are considered healthy if they're in the
+ * RUNNING
state. Tasks for services that use a load balancer are
+ * considered healthy if they're in the RUNNING
state and the
+ * container instance they're hosted on is reported as healthy by the load
* balancer.
The maximumPercent
parameter represents an upper limit on the
- * number of running tasks during a deployment, which enables you to define the
+ * number of running tasks during a deployment. You can use it to define the
* deployment batch size. For example, if desiredCount
is four tasks,
* a maximum of 200% starts four new tasks before stopping the four older tasks
* (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent
* of docker stop
is issued to the containers running in the task. This
- * results in a SIGTERM
and a 30-second timeout, after which
+ * results in a SIGTERM
and a 30-second timeout. After this,
* SIGKILL
is sent and the containers are forcibly stopped. If the
* container handles the SIGTERM
gracefully and exits within 30 seconds from
* receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your - * cluster with the following logic:
+ * cluster with the following logic. *Determine which of the container instances in your cluster can support your - * service's task definition (for example, they have the required CPU, memory, - * ports, and container instance attributes).
+ * service's task definition. For example, they have the required CPU, memory, + * ports, and container instance attributes. *By default, the service scheduler attempts to balance tasks across - * Availability Zones in this manner (although you can choose a different placement - * strategy):
+ * Availability Zones in this manner even though you can choose a different + * placement strategy. *Sort the valid container instances by the fewest number of running
diff --git a/clients/client-ecs/src/ECSClient.ts b/clients/client-ecs/src/ECSClient.ts
index 272faee3f9f0b..26dad2c86799a 100644
--- a/clients/client-ecs/src/ECSClient.ts
+++ b/clients/client-ecs/src/ECSClient.ts
@@ -439,18 +439,18 @@ export interface ECSClientResolvedConfig extends ECSClientResolvedConfigType {}
/**
*
Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service that makes + *
Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes * it easy to run, stop, and manage Docker containers on a cluster. You can host your - * cluster on a serverless infrastructure that is managed by Amazon ECS by launching your + * cluster on a serverless infrastructure that's managed by Amazon ECS by launching your * services or tasks on Fargate. For more control, you can host your tasks on a cluster * of Amazon Elastic Compute Cloud (Amazon EC2) instances that you manage.
*Amazon ECS makes it easy to launch and stop container-based applications with simple API - * calls, allows you to get the state of your cluster from a centralized service, and gives - * you access to many familiar Amazon EC2 features.
+ * calls. This makes it easy to get the state of your cluster from a centralized service, + * and gives you access to many familiar Amazon EC2 features. *You can use Amazon ECS to schedule the placement of containers across your cluster based on - * your resource needs, isolation policies, and availability requirements. Amazon ECS eliminates - * the need for you to operate your own cluster management and configuration management - * systems or worry about scaling your management infrastructure.
+ * your resource needs, isolation policies, and availability requirements. With Amazon ECS, you + * don't need to operate your own cluster management and configuration management systems. + * You also don't need to worry about scaling your management infrastructure. */ export class ECSClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-ecs/src/commands/CreateCapacityProviderCommand.ts b/clients/client-ecs/src/commands/CreateCapacityProviderCommand.ts index 2d961e61adcc6..2711ab3b0e82c 100644 --- a/clients/client-ecs/src/commands/CreateCapacityProviderCommand.ts +++ b/clients/client-ecs/src/commands/CreateCapacityProviderCommand.ts @@ -25,10 +25,10 @@ export interface CreateCapacityProviderCommandOutput extends CreateCapacityProvi *Creates a new capacity provider. Capacity providers are associated with an Amazon ECS * cluster and are used in capacity provider strategies to facilitate cluster auto * scaling.
- *Only capacity providers using an Auto Scaling group can be created. Amazon ECS tasks on
- * Fargate use the FARGATE
and FARGATE_SPOT
capacity providers
- * which are already created and available to all accounts in Regions supported by
- * Fargate.
Only capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on
+ * Fargate use the FARGATE
and FARGATE_SPOT
capacity providers.
+ * These providers are available to all accounts in the Amazon Web Services Regions that Fargate
+ * supports.
CreateCluster
action.
* When you call the CreateCluster API operation, Amazon ECS attempts to - * create the Amazon ECS service-linked role for your account so that required resources in - * other Amazon Web Services services can be managed on your behalf. However, if the IAM user that - * makes the call does not have permissions to create the service-linked role, it is - * not created. For more information, see Using + * create the Amazon ECS service-linked role for your account. This is so that it can manage + * required resources in other Amazon Web Services services on your behalf. However, if the IAM user + * that makes the call doesn't have permissions to create the service-linked role, it + * isn't created. For more information, see Using * Service-Linked Roles for Amazon ECS in the * Amazon Elastic Container Service Developer Guide.
*Runs and maintains a desired number of tasks from a specified task definition. If the
- * number of tasks running in a service drops below the desiredCount
, Amazon ECS
- * runs another copy of the task in the specified cluster. To update an existing service,
- * see the UpdateService action.
Runs and maintains your desired number of tasks from a specified task definition. If
+ * the number of tasks running in a service drops below the desiredCount
,
+ * Amazon ECS runs another copy of the task in the specified cluster. To update an existing
+ * service, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can * optionally run your service behind one or more load balancers. The load balancers * distribute traffic across the tasks that are associated with the service. For more * information, see Service Load Balancing in the * Amazon Elastic Container Service Developer Guide.
- *Tasks for services that do not use a load balancer are considered
- * healthy if they're in the RUNNING
state. Tasks for services that
- * do use a load balancer are considered healthy if they're in the
- * RUNNING
state and the container instance that they're hosted on is
- * reported as healthy by the load balancer.
Tasks for services that don't use a load balancer are considered healthy if they're in
+ * the RUNNING
state. Tasks for services that use a load balancer are
+ * considered healthy if they're in the RUNNING
state and the container
+ * instance that they're hosted on is reported as healthy by the load balancer.
There are two service scheduler strategies available:
*
* REPLICA
- The replica scheduling strategy places and
- * maintains the desired number of tasks across your cluster. By default, the
+ * maintains your desired number of tasks across your cluster. By default, the
* service scheduler spreads tasks across Availability Zones. You can use task
* placement strategies and constraints to customize task placement decisions. For
* more information, see Service Scheduler Concepts in the
@@ -52,50 +51,51 @@ export interface CreateServiceCommandOutput extends CreateServiceResponse, __Met
* DAEMON
- The daemon scheduling strategy deploys exactly one
* task on each active container instance that meets all of the task placement
* constraints that you specify in your cluster. The service scheduler also
- * evaluates the task placement constraints for running tasks and will stop tasks
- * that do not meet the placement constraints. When using this strategy, you don't
+ * evaluates the task placement constraints for running tasks. It also stops tasks
+ * that don't meet the placement constraints. When using this strategy, you don't
* need to specify a desired number of tasks, a task placement strategy, or use
* Service Auto Scaling policies. For more information, see Service Scheduler Concepts in the
* Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment
- * is triggered by changing properties, such as the task definition or the desired count of
- * a service, with an UpdateService operation. The default value for a
- * replica service for minimumHealthyPercent
is 100%. The default value for a
- * daemon service for minimumHealthyPercent
is 0%.
If a service is using the ECS
deployment controller, the minimum healthy
+ * is initiated by changing properties. For example, the deployment might be initiated by
+ * the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for
+ * minimumHealthyPercent
is 100%. The default value for a daemon service
+ * for minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy
* percent represents a lower limit on the number of tasks in a service that must remain in
- * the RUNNING
state during a deployment, as a percentage of the desired
- * number of tasks (rounded up to the nearest integer), and while any container instances
- * are in the DRAINING
state if the service contains tasks using the
- * EC2 launch type. This parameter enables you to deploy without using
- * additional cluster capacity. For example, if your service has a desired number of four
- * tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks
- * to free up cluster capacity before starting two new tasks. Tasks for services that
- * do not use a load balancer are considered healthy if they're in
- * the RUNNING
state. Tasks for services that do use a
- * load balancer are considered healthy if they're in the RUNNING
state and
- * they're reported as healthy by the load balancer. The default value for minimum healthy
- * percent is 100%.
If a service is using the ECS
deployment controller, the maximum percent parameter represents an upper limit on the
+ * the RUNNING
state during a deployment. Specifically, it represents it as a
+ * percentage of your desired number of tasks (rounded up to the nearest integer). This
+ * happens when any of your container instances are in the DRAINING
state if
+ * the service contains tasks using the EC2 launch type. Using this
+ * parameter, you can deploy without using additional cluster capacity. For example, if you
+ * set your service to have desired number of four tasks and a minimum healthy percent of
+ * 50%, the scheduler might stop two existing tasks to free up cluster capacity before
+ * starting two new tasks. If they're in the RUNNING
state, tasks for services
+ * that don't use a load balancer are considered healthy . If they're in the
+ * RUNNING
state and reported as healthy by the load balancer, tasks for
+ * services that do use a load balancer are considered healthy . The
+ * default value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the
* number of tasks in a service that are allowed in the RUNNING
or
- * PENDING
state during a deployment, as a percentage of the desired
- * number of tasks (rounded down to the nearest integer), and while any container instances
- * are in the DRAINING
state if the service contains tasks using the
- * EC2 launch type. This parameter enables you to define the deployment batch
- * size. For example, if your service has a desired number of four tasks and a maximum
- * percent value of 200%, the scheduler may start four new tasks before stopping the four
- * older tasks (provided that the cluster resources required to do this are available). The
- * default value for maximum percent is 200%.
If a service is using either the CODE_DEPLOY
or EXTERNAL
+ * PENDING
state during a deployment. Specifically, it represents it as a
+ * percentage of the desired number of tasks (rounded down to the nearest integer). This
+ * happens when any of your container instances are in the DRAINING
state if
+ * the service contains tasks using the EC2 launch type. Using this
+ * parameter, you can define the deployment batch size. For example, if your service has a
+ * desired number of four tasks and a maximum percent value of 200%, the scheduler may
+ * start four new tasks before stopping the four older tasks (provided that the cluster
+ * resources required to do this are available). The default value for maximum percent is
+ * 200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
* deployment controller types and tasks that use the EC2 launch type, the
* minimum healthy percent and maximum percent values are used only to define the lower and upper limit
- * on the number of the tasks in the service that remain in the RUNNING
state
- * while the container instances are in the DRAINING
state. If the tasks in
- * the service use the Fargate launch type, the minimum healthy percent and
- * maximum percent values aren't used, although they're currently visible when describing
- * your service.
RUNNING
state.
+ * This is while the container instances are in the DRAINING
state. If the
+ * tasks in the service use the Fargate launch type, the minimum healthy
+ * percent and maximum percent values aren't used. This is the case even if they're
+ * currently visible when describing your service.
* When creating a service that uses the EXTERNAL
deployment controller, you
* can specify only parameters that aren't controlled at the task set level. The only
* required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS Deployment Types in the Amazon Elastic Container Service Developer Guide.
Determine which of the container instances in your cluster can support your - * service's task definition (for example, they have the required CPU, memory, - * ports, and container instance attributes).
+ *Determine which of the container instances in your cluster can support the + * task definition of your service. For example, they have the required CPU, + * memory, ports, and container instance attributes.
*By default, the service scheduler attempts to balance tasks across
- * Availability Zones in this manner (although you can choose a different placement
- * strategy) with the placementStrategy
parameter):
placementStrategy
+ * parameter.
* Sort the valid container instances, giving priority to instances that @@ -121,7 +122,7 @@ export interface CreateServiceCommandOutput extends CreateServiceResponse, __Met *
Place the new service task on a valid container instance in an optimal - * Availability Zone (based on the previous steps), favoring container + * Availability Zone based on the previous steps, favoring container * instances with the fewest number of running tasks for this * service.
*Deletes the specified capacity provider.
*The FARGATE
and FARGATE_SPOT
capacity providers are
- * reserved and cannot be deleted. You can disassociate them from a cluster using
- * either the PutClusterCapacityProviders API or by deleting the
+ * reserved and can't be deleted. You can disassociate them from a cluster using either
+ * the PutClusterCapacityProviders API or by deleting the
* cluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from
@@ -35,7 +35,7 @@ export interface DeleteCapacityProviderCommandOutput extends DeleteCapacityProvi
* strategy. When updating a service, the forceNewDeployment
option can be
* used to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity
* provider are transitioned to use the capacity from the remaining capacity providers.
- * Only capacity providers that are not associated with a cluster can be deleted. To remove
+ * Only capacity providers that aren't associated with a cluster can be deleted. To remove
* a capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Deletes the specified cluster. The cluster will transition to the
- * INACTIVE
state. Clusters with an INACTIVE
status may
- * remain discoverable in your account for a period of time. However, this behavior is
- * subject to change in the future, so you should not rely on INACTIVE
- * clusters persisting.
Deletes the specified cluster. The cluster transitions to the INACTIVE
+ * state. Clusters with an INACTIVE
status might remain discoverable in your
+ * account for a period of time. However, this behavior is subject to change in the future.
+ * We don't recommend that you rely on INACTIVE
clusters persisting.
You must deregister all container instances from this cluster before you may delete * it. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.
* @example diff --git a/clients/client-ecs/src/commands/DeleteServiceCommand.ts b/clients/client-ecs/src/commands/DeleteServiceCommand.ts index fa757b16bba69..e930b52a234e2 100644 --- a/clients/client-ecs/src/commands/DeleteServiceCommand.ts +++ b/clients/client-ecs/src/commands/DeleteServiceCommand.ts @@ -24,7 +24,7 @@ export interface DeleteServiceCommandOutput extends DeleteServiceResponse, __Met /** *Deletes a specified service within a cluster. You can delete a service if you have no * running tasks in it and the desired task count is zero. If the service is actively - * maintaining tasks, you cannot delete it, and you must update the service to a desired + * maintaining tasks, you can't delete it, and you must update the service to a desired * task count of zero. For more information, see UpdateService.
*When you delete a service, if there are still running tasks that require cleanup, diff --git a/clients/client-ecs/src/commands/DeregisterContainerInstanceCommand.ts b/clients/client-ecs/src/commands/DeregisterContainerInstanceCommand.ts index 97dba09a59b4f..31c1a35d9c4a0 100644 --- a/clients/client-ecs/src/commands/DeregisterContainerInstanceCommand.ts +++ b/clients/client-ecs/src/commands/DeregisterContainerInstanceCommand.ts @@ -27,15 +27,16 @@ export interface DeregisterContainerInstanceCommandOutput *
Deregisters an Amazon ECS container instance from the specified cluster. This instance is * no longer available to run tasks.
*If you intend to use the container instance for some other purpose after - * deregistration, you should stop all of the tasks running on the container instance - * before deregistration. That prevents any orphaned tasks from consuming resources.
- *Deregistering a container instance removes the instance from a cluster, but it does - * not terminate the EC2 instance. If you are finished using the instance, be sure to - * terminate it in the Amazon EC2 console to stop billing.
+ * deregistration, we recommend that you stop all of the tasks running on the container + * instance before deregistration. That prevents any orphaned tasks from consuming + * resources. + *Deregistering a container instance removes the instance from a cluster, but it doesn't + * terminate the EC2 instance. If you are finished using the instance, be sure to terminate + * it in the Amazon EC2 console to stop billing.
*If you terminate a running container instance, Amazon ECS automatically deregisters the * instance from your cluster (stopped container instances or instances with - * disconnected agents are not automatically deregistered when terminated).
+ * disconnected agents aren't automatically deregistered when terminated). *INACTIVE
task definition continue to run without disruption.
* Existing services that reference an INACTIVE
task definition can still
* scale up or down by modifying the service's desired count.
- * You cannot use an INACTIVE
task definition to run new tasks or create new
- * services, and you cannot update an existing service to reference an
- * INACTIVE
task definition. However, there may be up to a 10-minute
- * window following deregistration where these restrictions have not yet taken
- * effect.
You can't use an INACTIVE
task definition to run new tasks or create new
+ * services, and you can't update an existing service to reference an INACTIVE
+ * task definition. However, there may be up to a 10-minute window following deregistration
+ * where these restrictions have not yet taken effect.
At this time, INACTIVE
task definitions remain discoverable in your
- * account indefinitely. However, this behavior is subject to change in the future, so
- * you should not rely on INACTIVE
task definitions persisting beyond the
- * lifecycle of any associated tasks and services.
INACTIVE
task definitions persisting
+ * beyond the lifecycle of any associated tasks and services.
* This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Returns an endpoint for - * the Amazon ECS agent to poll for updates.
+ *Returns an endpoint for the Amazon ECS agent to poll for updates.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/commands/ListAttributesCommand.ts b/clients/client-ecs/src/commands/ListAttributesCommand.ts index 7df9700a6c7dc..23a366f9e487f 100644 --- a/clients/client-ecs/src/commands/ListAttributesCommand.ts +++ b/clients/client-ecs/src/commands/ListAttributesCommand.ts @@ -26,9 +26,9 @@ export interface ListAttributesCommandOutput extends ListAttributesResponse, __M * When you specify a target type and cluster,ListAttributes
returns a list
* of attribute objects, one for each attribute on each resource. You can filter the list
* of results to a single attribute name to only return results that have that name. You
- * can also filter the results by attribute name and value, for example, to see which
- * container instances in a cluster are running a Linux AMI
- * (ecs.os-type=linux
).
+ * can also filter the results by attribute name and value. You can do this, for example,
+ * to see which container instances in a cluster are running a Linux AMI
+ * (ecs.os-type=linux
).
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
diff --git a/clients/client-ecs/src/commands/ListTaskDefinitionFamiliesCommand.ts b/clients/client-ecs/src/commands/ListTaskDefinitionFamiliesCommand.ts
index 43e6de26ded6c..772e988ac8409 100644
--- a/clients/client-ecs/src/commands/ListTaskDefinitionFamiliesCommand.ts
+++ b/clients/client-ecs/src/commands/ListTaskDefinitionFamiliesCommand.ts
@@ -22,12 +22,12 @@ export interface ListTaskDefinitionFamiliesCommandInput extends ListTaskDefiniti
export interface ListTaskDefinitionFamiliesCommandOutput extends ListTaskDefinitionFamiliesResponse, __MetadataBearer {}
/**
- * Returns a list of task definition families that are registered to your account (which
- * may include task definition families that no longer have any ACTIVE
task
- * definition revisions).
You can filter out task definition families that do not contain any
- * ACTIVE
task definition revisions by setting the status
- * parameter to ACTIVE
. You can also filter the results with the
+ *
Returns a list of task definition families that are registered to your account. This
+ * list includes task definition families that no longer have any ACTIVE
task
+ * definition revisions.
You can filter out task definition families that don't contain any ACTIVE
+ * task definition revisions by setting the status
parameter to
+ * ACTIVE
. You can also filter the results with the
* familyPrefix
parameter.
Modifies an account setting. Account settings are set on a per-Region basis.
*If you change the account setting for the root user, the default settings for all of - * the IAM users and roles for which no individual account setting has been specified are - * reset. For more information, see Account + * the IAM users and roles that no individual account setting was specified are reset for. + * For more information, see Account * Settings in the Amazon Elastic Container Service Developer Guide.
*When serviceLongArnFormat
, taskLongArnFormat
, or
* containerInstanceLongArnFormat
are specified, the Amazon Resource Name
* (ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or
* the root user for an account is affected. The opt-in and opt-out account setting must be
- * set for each Amazon ECS resource separately. The ARN and resource ID format of a resource
- * will be defined by the opt-in status of the IAM user or role that created the resource.
- * You must enable this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)
* limit for any new container instances that support the feature is changed. If
* awsvpcTrunking
is enabled, any new container instances that support the
diff --git a/clients/client-ecs/src/commands/PutAttributesCommand.ts b/clients/client-ecs/src/commands/PutAttributesCommand.ts
index 8cae97fecdd4c..2573a449abcad 100644
--- a/clients/client-ecs/src/commands/PutAttributesCommand.ts
+++ b/clients/client-ecs/src/commands/PutAttributesCommand.ts
@@ -22,8 +22,8 @@ export interface PutAttributesCommandInput extends PutAttributesRequest {}
export interface PutAttributesCommandOutput extends PutAttributesResponse, __MetadataBearer {}
/**
- *
Create or update an attribute on an Amazon ECS resource. If the attribute does not exist, - * it is created. If the attribute exists, its value is replaced with the specified value. + *
Create or update an attribute on an Amazon ECS resource. If the attribute doesn't exist, + * it's created. If the attribute exists, its value is replaced with the specified value. * To delete an attribute, use DeleteAttributes. For more information, * see Attributes in the * Amazon Elastic Container Service Developer Guide.
diff --git a/clients/client-ecs/src/commands/PutClusterCapacityProvidersCommand.ts b/clients/client-ecs/src/commands/PutClusterCapacityProvidersCommand.ts index d74e40a76b4d4..6a34196a1a1e4 100644 --- a/clients/client-ecs/src/commands/PutClusterCapacityProvidersCommand.ts +++ b/clients/client-ecs/src/commands/PutClusterCapacityProvidersCommand.ts @@ -29,14 +29,14 @@ export interface PutClusterCapacityProvidersCommandOutput *You must specify both the available capacity providers and a default capacity provider * strategy for the cluster. If the specified cluster has existing capacity providers * associated with it, you must specify all existing capacity providers in addition to any - * new ones you want to add. Any existing capacity providers associated with a cluster that - * are omitted from a PutClusterCapacityProviders API call will be - * disassociated with the cluster. You can only disassociate an existing capacity provider - * from a cluster if it's not being used by any existing tasks.
+ * new ones you want to add. Any existing capacity providers that are associated with a + * cluster that are omitted from a PutClusterCapacityProviders API call + * will be disassociated with the cluster. You can only disassociate an existing capacity + * provider from a cluster if it's not being used by any existing tasks. *When creating a service or running a task on a cluster, if no capacity provider or
* launch type is specified, then the cluster's default capacity provider strategy is used.
- * It is recommended to define a default capacity provider strategy for your cluster,
- * however you may specify an empty array ([]
) to bypass defining a default
+ * We recommend that you define a default capacity provider strategy for your cluster.
+ * However, you must specify an empty array ([]
) to bypass defining a default
* strategy.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Registers an EC2 - * instance into the specified cluster. This instance becomes available to place containers - * on.
+ *Registers an EC2 instance into the specified cluster. This instance becomes available + * to place containers on.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/commands/RegisterTaskDefinitionCommand.ts b/clients/client-ecs/src/commands/RegisterTaskDefinitionCommand.ts index 211f7abb4fee6..8aa6d62d9cdf9 100644 --- a/clients/client-ecs/src/commands/RegisterTaskDefinitionCommand.ts +++ b/clients/client-ecs/src/commands/RegisterTaskDefinitionCommand.ts @@ -30,7 +30,7 @@ export interface RegisterTaskDefinitionCommandOutput extends RegisterTaskDefinit *You can specify an IAM role for your task with the taskRoleArn
parameter.
* When you specify an IAM role for a task, its containers can then use the latest versions
* of the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in
- * the IAM policy associated with the role. For more information, see IAM
+ * the IAM policy that's associated with the role. For more information, see IAM
* Roles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition
* with the networkMode
parameter. The available network modes correspond to
diff --git a/clients/client-ecs/src/commands/RunTaskCommand.ts b/clients/client-ecs/src/commands/RunTaskCommand.ts
index b7dc61faebcc8..00e86702c4fd0 100644
--- a/clients/client-ecs/src/commands/RunTaskCommand.ts
+++ b/clients/client-ecs/src/commands/RunTaskCommand.ts
@@ -26,11 +26,11 @@ export interface RunTaskCommandOutput extends RunTaskResponse, __MetadataBearer
* Amazon Elastic Container Service Developer Guide.
Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.
- *The Amazon ECS API follows an eventual consistency model, due to the distributed nature of - * the system supporting the API. This means that the result of an API command you run that - * affects your Amazon ECS resources might not be immediately visible to all subsequent commands - * you run. Keep this in mind when you carry out an API command that immediately follows a - * previous API command.
+ *The Amazon ECS API follows an eventual consistency model. This is because the distributed + * nature of the system supporting the API. This means that the result of an API command + * you run that affects your Amazon ECS resources might not be immediately visible to all + * subsequent commands you run. Keep this in mind when you carry out an API command that + * immediately follows a previous API command.
*To manage eventual consistency, you can do the following:
*This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to - * acknowledge that an attachment changed states.
+ *Sent to acknowledge that an attachment changed states.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/commands/SubmitContainerStateChangeCommand.ts b/clients/client-ecs/src/commands/SubmitContainerStateChangeCommand.ts index ff8c3cc6b7153..4b762cf108363 100644 --- a/clients/client-ecs/src/commands/SubmitContainerStateChangeCommand.ts +++ b/clients/client-ecs/src/commands/SubmitContainerStateChangeCommand.ts @@ -25,8 +25,7 @@ export interface SubmitContainerStateChangeCommandOutput extends SubmitContainer *This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to - * acknowledge that a container changed states.
+ *Sent to acknowledge that a container changed states.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/commands/SubmitTaskStateChangeCommand.ts b/clients/client-ecs/src/commands/SubmitTaskStateChangeCommand.ts index df98efdaec06f..f523a98689bee 100644 --- a/clients/client-ecs/src/commands/SubmitTaskStateChangeCommand.ts +++ b/clients/client-ecs/src/commands/SubmitTaskStateChangeCommand.ts @@ -25,8 +25,7 @@ export interface SubmitTaskStateChangeCommandOutput extends SubmitTaskStateChang *This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
*Sent to acknowledge - * that a task changed states.
+ *Sent to acknowledge that a task changed states.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecs/src/commands/TagResourceCommand.ts b/clients/client-ecs/src/commands/TagResourceCommand.ts index b2018c03d956d..150f7361f52d8 100644 --- a/clients/client-ecs/src/commands/TagResourceCommand.ts +++ b/clients/client-ecs/src/commands/TagResourceCommand.ts @@ -23,8 +23,8 @@ export interface TagResourceCommandOutput extends TagResourceResponse, __Metadat /** *Associates the specified tags to a resource with the specified
- * resourceArn
. If existing tags on a resource are not specified in the
- * request parameters, they are not changed. When a resource is deleted, the tags
+ * resourceArn
. If existing tags on a resource aren't specified in the
+ * request parameters, they aren't changed. When a resource is deleted, the tags that are
* associated with that resource are deleted as well.
Updates the Amazon ECS container agent on a specified container instance. Updating the - * Amazon ECS container agent does not interrupt running tasks or services on the container + * Amazon ECS container agent doesn't interrupt running tasks or services on the container * instance. The process for updating the agent differs depending on whether your container * instance was launched with the Amazon ECS-optimized AMI or another operating system.
*The UpdateContainerAgent
API isn't supported for container instances
* using the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,
- * you can update the ecs-init
package which will update the agent. For
- * more information, see Updating the
+ * you can update the ecs-init
package. This updates the agent. For more
+ * information, see Updating the
* Amazon ECS container agent in the
* Amazon Elastic Container Service Developer Guide.
A container instance cannot be changed to DRAINING
until it has
+ *
A container instance can't be changed to DRAINING
until it has
* reached an ACTIVE
status. If the instance is in any other status, an
* error will be received.
RUNNING
+ * load balancer are considered healthy if they're in the RUNNING
* state. Tasks for services that use a load balancer are considered healthy if
- * they are in the RUNNING
state and the container instance they are
+ * they're in the RUNNING
state and the container instance they're
* hosted on is reported as healthy by the load balancer.
* The maximumPercent
parameter represents an upper limit on the
- * number of running tasks during task replacement, which enables you to define the
+ * number of running tasks during task replacement. You can use this to define the
* replacement batch size. For example, if desiredCount
is four tasks,
* a maximum of 200% starts four new tasks before stopping the four tasks to be
* drained, provided that the cluster resources required to do this are available.
@@ -67,7 +67,7 @@ export interface UpdateContainerInstancesStateCommandOutput
*
Any PENDING
or RUNNING
tasks that do not belong to a service
- * are not affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
* tasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to diff --git a/clients/client-ecs/src/commands/UpdateServiceCommand.ts b/clients/client-ecs/src/commands/UpdateServiceCommand.ts index 964697c89cdcd..abfdbc1d1d838 100644 --- a/clients/client-ecs/src/commands/UpdateServiceCommand.ts +++ b/clients/client-ecs/src/commands/UpdateServiceCommand.ts @@ -36,12 +36,12 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met * only the desired count, deployment configuration, task placement constraints and * strategies, and health check grace period can be updated using this API. If the network * configuration, platform version, or task definition need to be updated, a new CodeDeploy - * deployment should be created. For more information, see CreateDeployment in the CodeDeploy API Reference.
+ * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference. *For services using an external deployment controller, you can update only the desired * count, task placement constraints and strategies, and health check grace period using * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, you should create a new task set. For more - * information, see CreateTaskSet.
+ * task definition need to be updated, create a new task set. For more information, see + * CreateTaskSet. *You can add to or subtract from the number of instantiations of a task definition in a
* service by specifying the cluster that the service is running in and a new
* desiredCount
parameter.
If your updated Docker image uses the same tag as what is in the existing task
- * definition for your service (for example, my_image:latest
), you do not
+ * definition for your service (for example, my_image:latest
), you don't
* need to create a new revision of your task definition. You can update the service
* using the forceNewDeployment
option. The new tasks launched by the
* deployment pull the current image/tag combination from your repository when they
@@ -67,15 +67,15 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met
* desiredCount
temporarily during a deployment. For example, if
* desiredCount
is four tasks, a minimum of 50% allows the
* scheduler to stop two existing tasks before starting two new tasks. Tasks for
- * services that do not use a load balancer are considered healthy if they are in
- * the RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they are in the RUNNING
state and the
- * container instance they are hosted on is reported as healthy by the load
+ * services that don't use a load balancer are considered healthy if they're in the
+ * RUNNING
state. Tasks for services that use a load balancer are
+ * considered healthy if they're in the RUNNING
state and the
+ * container instance they're hosted on is reported as healthy by the load
* balancer.
The maximumPercent
parameter represents an upper limit on the
- * number of running tasks during a deployment, which enables you to define the
+ * number of running tasks during a deployment. You can use it to define the
* deployment batch size. For example, if desiredCount
is four tasks,
* a maximum of 200% starts four new tasks before stopping the four older tasks
* (provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent
* of docker stop
is issued to the containers running in the task. This
- * results in a SIGTERM
and a 30-second timeout, after which
+ * results in a SIGTERM
and a 30-second timeout. After this,
* SIGKILL
is sent and the containers are forcibly stopped. If the
* container handles the SIGTERM
gracefully and exits within 30 seconds from
* receiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your - * cluster with the following logic:
+ * cluster with the following logic. *Determine which of the container instances in your cluster can support your - * service's task definition (for example, they have the required CPU, memory, - * ports, and container instance attributes).
+ * service's task definition. For example, they have the required CPU, memory, + * ports, and container instance attributes. *By default, the service scheduler attempts to balance tasks across - * Availability Zones in this manner (although you can choose a different placement - * strategy):
+ * Availability Zones in this manner even though you can choose a different + * placement strategy. *Sort the valid container instances by the fewest number of running diff --git a/clients/client-ecs/src/endpoints.ts b/clients/client-ecs/src/endpoints.ts index d009fedbad1a6..d13253fb9061a 100644 --- a/clients/client-ecs/src/endpoints.ts +++ b/clients/client-ecs/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "ecs.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "ecs-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "ecs.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "ecs-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-ecs/src/models/models_0.ts b/clients/client-ecs/src/models/models_0.ts index bb05d001748a7..156a5d1b5473d 100644 --- a/clients/client-ecs/src/models/models_0.ts +++ b/clients/client-ecs/src/models/models_0.ts @@ -2,7 +2,7 @@ import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; /** - *
You do not have authorization to perform the requested action.
+ *You don't have authorization to perform the requested action.
*/ export interface AccessDeniedException extends __SmithyException, $MetadataBearer { name: "AccessDeniedException"; @@ -29,9 +29,9 @@ export enum AgentUpdateStatus { } /** - *These errors are usually caused by a client action, such as using an action or - * resource on behalf of a user that doesn't have permissions to use the action or - * resource, or specifying an identifier that is not valid.
+ *These errors are usually caused by a client action. This client action might be using + * an action or resource on behalf of a user that doesn't have permissions to use the + * action or resource,. Or, it might be specifying an identifier that isn't valid.
*/ export interface ClientException extends __SmithyException, $MetadataBearer { name: "ClientException"; @@ -56,34 +56,34 @@ export enum ManagedScalingStatus { /** *The managed scaling settings for the Auto Scaling group capacity provider.
*When managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of
- * the Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an
- * Amazon ECS-managed CloudWatch metric with the specified targetCapacity
value as the
- * target value for the metric. For more information, see Using Managed Scaling in the Amazon Elastic Container Service Developer Guide.
targetCapacity
value as the target
+ * value for the metric. For more information, see Using Managed Scaling in the Amazon Elastic Container Service Developer Guide.
* If managed scaling is disabled, the user must manage the scaling of the Auto Scaling * group.
*/ export interface ManagedScaling { /** - *Whether or not to enable managed scaling for the capacity provider.
+ *Determines whether to enable managed scaling for the capacity provider.
*/ status?: ManagedScalingStatus | string; /** *The target capacity value for the capacity provider. The specified value must be
* greater than 0
and less than or equal to 100
. A value of
- * 100
will result in the Amazon EC2 instances in your Auto Scaling group being
- * completely utilized.
100
results in the Amazon EC2 instances in your Auto Scaling group being
+ * completely used.
*/
targetCapacity?: number;
/**
- * The minimum number of container instances that Amazon ECS will scale in or scale out at one + *
The minimum number of container instances that Amazon ECS scales in or scales out at one
* time. If this parameter is omitted, the default value of 1
is used.
The maximum number of container instances that Amazon ECS will scale in or scale out at one + *
The maximum number of container instances that Amazon ECS scales in or scales out at one
* time. If this parameter is omitted, the default value of 10000
is
* used.
When using managed termination protection, managed scaling must also be used - * otherwise managed termination protection will not work.
+ * otherwise managed termination protection doesn't work. *When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in * an Auto Scaling group that contain tasks from being terminated during a scale-in action. * The Auto Scaling group and each instance in the Auto Scaling group must have instance * protection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.
- *When managed termination protection is disabled, your Amazon EC2 instances are not - * protected from termination when the Auto Scaling group scales in.
+ *When managed termination protection is disabled, your Amazon EC2 instances aren't protected + * from termination when the Auto Scaling group scales in.
*/ managedTerminationProtection?: ManagedTerminationProtection | string; } @@ -154,7 +154,7 @@ export namespace AutoScalingGroupProvider { /** *The metadata that you apply to a resource to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
+ * Each tag consists of a key and an optional value. You define them. *The following basic restrictions apply to tags:
*The name of the capacity provider. Up to 255 characters are allowed, including letters
- * (upper and lowercase), numbers, underscores, and hyphens. The name cannot be prefixed
- * with "aws
", "ecs
", or "fargate
".
The name of the capacity provider. Up to 255 characters are allowed. They include
+ * letters (both upper and lowercase letters), numbers, underscores (_), and hyphens (-).
+ * The name can't be prefixed with "aws
", "ecs
", or
+ * "fargate
".
The metadata that you apply to the capacity provider to help you categorize and - * organize them. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
+ *The metadata that you apply to the capacity provider to categorize and organize them + * more conveniently. Each tag consists of a key and an optional value. You define both of + * them.
+ *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -286,7 +287,7 @@ export enum CapacityProviderUpdateStatus { } /** - *The details of a capacity provider.
+ *The details for a capacity provider.
*/ export interface CapacityProvider { /** @@ -302,7 +303,7 @@ export interface CapacityProvider { /** *The current status of the capacity provider. Only capacity providers in an
* ACTIVE
state can be used in a cluster. When a capacity provider is
- * successfully deleted, it will have an INACTIVE
status.
INACTIVE
status.
*/
status?: CapacityProviderStatus | string;
@@ -313,7 +314,7 @@ export interface CapacityProvider {
/**
* The update status of the capacity provider. The following are the possible states that - * will be returned.
+ * is returned. *The capacity provider has been successfully deleted and will have an + *
The capacity provider was successfully deleted and has an
* INACTIVE
status.
The capacity provider was unable to be deleted. The update status reason - * will provide further details about why the delete failed.
+ *The capacity provider can't be deleted. The update status reason provides + * further details about why the delete failed.
*The metadata that you apply to the capacity provider to help you categorize and - * organize it. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
+ * organize it. Each tag consists of a key and an optional value. You define both. + *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -404,7 +404,7 @@ export namespace CreateCapacityProviderResponse { } /** - *The specified parameter is invalid. Review the available parameters for the API + *
The specified parameter isn't valid. Review the available parameters for the API * request.
*/ export interface InvalidParameterException extends __SmithyException, $MetadataBearer { @@ -423,7 +423,7 @@ export namespace InvalidParameterException { } /** - *The limit for the resource has been exceeded.
+ *The limit for the resource was exceeded.
*/ export interface LimitExceededException extends __SmithyException, $MetadataBearer { name: "LimitExceededException"; @@ -459,8 +459,8 @@ export namespace ServerException { } /** - *There is already a current Amazon ECS container agent update in progress on the specified - * container instance. If the container agent becomes disconnected while it is in a + *
There's already a current Amazon ECS container agent update in progress on the container
+ * instance that's specified. If the container agent becomes disconnected while it's in a
* transitional stage, such as PENDING
or STAGING
, the update
* process can get stuck in that state. However, when the agent reconnects, it resumes
* where it stopped previously.
Whether or not to enable encryption on the CloudWatch logs. If not specified, + *
Determines whether to enable encryption on the CloudWatch logs. If not specified, * encryption will be disabled.
*/ cloudWatchEncryptionEnabled?: boolean; @@ -508,8 +508,8 @@ export interface ExecuteCommandLogConfiguration { s3BucketName?: string; /** - *Whether or not to use encryption on the S3 logs. If not specified, - * encryption is not used.
+ *Determines whether to use encryption on the S3 logs. If not specified, encryption is + * not used.
*/ s3EncryptionEnabled?: boolean; @@ -620,7 +620,7 @@ export namespace ClusterConfiguration { *FARGATE_SPOT
capacity providers. The Fargate capacity providers are
* available to all accounts and only need to be associated with a cluster to be used in a
* capacity provider strategy.
- * A capacity provider strategy may contain a maximum of 6 capacity providers.
+ *A capacity provider strategy may contain a maximum of 6 capacity providers.
*/ export interface CapacityProviderStrategyItem { /** @@ -636,16 +636,16 @@ export interface CapacityProviderStrategyItem { *If no weight
value is specified, the default value of 0
is
* used. When multiple capacity providers are specified within a capacity provider
* strategy, at least one of the capacity providers must have a weight value greater than
- * zero and any capacity providers with a weight of 0
will not be used to
- * place tasks. If you specify multiple capacity providers in a strategy that all have a
- * weight of 0
, any RunTask
or CreateService
actions
- * using the capacity provider strategy will fail.
0
can't be used to place
+ * tasks. If you specify multiple capacity providers in a strategy that all have a weight
+ * of 0
, any RunTask
or CreateService
actions using
+ * the capacity provider strategy will fail.
* An example scenario for using weights is defining a strategy that contains two
* capacity providers and both have a weight of 1
, then when the
* base
is satisfied, the tasks will be split evenly across the two
* capacity providers. Using that same logic, if you specify a weight of 1
for
* capacityProviderA and a weight of 4
for
- * capacityProviderB, then for every one task that is run using
+ * capacityProviderB, then for every one task that's run using
* capacityProviderA, four tasks would use
* capacityProviderB.
The name of your cluster. If you do not specify a name for your cluster, you create a
- * cluster named default
. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.
The name of your cluster. If you don't specify a name for your cluster, you create a
+ * cluster that's named default
. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.
The metadata that you apply to the cluster to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
+ * Each tag consists of a key and an optional value. You define both. *The following basic restrictions apply to tags:
*The setting to use when creating a cluster. This parameter is used to enable CloudWatch
- * Container Insights for a cluster. If this value is specified, it will override the
+ * Container Insights for a cluster. If this value is specified, it overrides the
* containerInsights
value set with PutAccountSetting or
* PutAccountSettingDefault.
The execute command configuration for the cluster.
+ *The + * execute command configuration for the cluster.
*/ configuration?: ClusterConfiguration; @@ -768,8 +769,9 @@ export interface CreateClusterRequest { * strategy when calling the CreateService or RunTask * actions. *If specifying a capacity provider that uses an Auto Scaling group, the capacity - * provider must already be created and not already associated with another cluster. New - * Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.
+ * provider must be created but not associated with another cluster. New Auto Scaling group + * capacity providers can be created with the CreateCapacityProvider API + * operation. *To use a Fargate capacity provider, specify either the FARGATE
or
* FARGATE_SPOT
capacity providers. The Fargate capacity providers are
* available to all accounts and only need to be associated with a cluster to be
@@ -780,13 +782,13 @@ export interface CreateClusterRequest {
capacityProviders?: string[];
/**
- *
The capacity provider strategy to set as the default for the cluster. When a default - * capacity provider strategy is set for a cluster, when calling the RunTask or CreateService APIs with no capacity + *
The capacity provider strategy to set as the default for the cluster. After a default + * capacity provider strategy is set for a cluster, when you call the RunTask or CreateService APIs with no capacity * provider strategy or launch type specified, the default capacity provider strategy for * the cluster is used.
- *If a default capacity provider strategy is not defined for a cluster during creation, - * it can be defined later with the PutClusterCapacityProviders API - * operation.
+ *If a default capacity provider strategy isn't defined for a cluster when it was + * created, it can be defined later with the PutClusterCapacityProviders + * API operation.
*/ defaultCapacityProviderStrategy?: CapacityProviderStrategyItem[]; } @@ -864,7 +866,7 @@ export namespace Attachment { } /** - *A regional grouping of one or more container instances on which you can run task + *
A regional grouping of one or more container instances where you can run task * requests. Each account receives a default cluster the first time you use the Amazon ECS * service, but you may also create other clusters. Clusters may contain more than one * instance type simultaneously.
@@ -886,7 +888,7 @@ export interface Cluster { configuration?: ClusterConfiguration; /** - *The status of the cluster. The following are the possible states that will be + *
The status of the cluster. The following are the possible states that are * returned.
*The cluster has capacity providers associated with it and the resources - * needed for the capacity provider are being created.
+ *The cluster has capacity providers that are associated with it and the + * resources needed for the capacity provider are being created.
*The cluster has capacity providers associated with it and the resources - * needed for the capacity provider are being deleted.
+ *The cluster has capacity providers that are associated with it and the + * resources needed for the capacity provider are being deleted.
*The cluster has capacity providers associated with it and the resources - * needed for the capacity provider have failed to create.
+ *The cluster has capacity providers that are associated with it and the + * resources needed for the capacity provider have failed to create.
*The cluster has been deleted. Clusters with an INACTIVE
* status may remain discoverable in your account for a period of time.
- * However, this behavior is subject to change in the future, so you should not
- * rely on INACTIVE
clusters persisting.
INACTIVE
clusters persisting.
* Additional information about your clusters that are separated by launch type, - * including:
+ *Additional information about your clusters that are separated by launch type. They + * include the following:
*runningEC2TasksCount
@@ -976,7 +978,7 @@ export interface Cluster { /** *The metadata that you apply to the cluster to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
+ * Each tag consists of a key and an optional value. You define both. *The following basic restrictions apply to tags:
*The resources attached to a cluster. When using a capacity provider with a cluster, - * the Auto Scaling plan that is created will be returned as a cluster attachment.
+ * the Auto Scaling plan that's created is returned as a cluster attachment. */ attachments?: Attachment[]; /** *The status of the capacity providers associated with the cluster. The following are - * the states that will be returned:
+ * the states that are returned. *The specified cluster could not be found. You can view your available clusters with - * ListClusters. Amazon ECS clusters are Region-specific.
+ *The specified cluster wasn't found. You can view your available clusters with ListClusters. Amazon ECS clusters are Region specific.
*/ export interface ClusterNotFoundException extends __SmithyException, $MetadataBearer { name: "ClusterNotFoundException"; @@ -1104,7 +1105,7 @@ export namespace ClusterNotFoundException { /** *The deployment circuit breaker can only be used for services using the rolling
- * update (ECS
) deployment type that are not behind a Classic Load Balancer.
ECS
) deployment type that aren't behind a Classic Load Balancer.
* The deployment circuit breaker determines whether a * service deployment will fail if the service can't reach a steady state. If enabled, a @@ -1115,14 +1116,15 @@ export namespace ClusterNotFoundException { */ export interface DeploymentCircuitBreaker { /** - *
Whether to enable the deployment circuit breaker logic for the service.
+ *Determines whether to enable the deployment circuit breaker logic for the + * service.
*/ enable: boolean | undefined; /** - *Whether to enable Amazon ECS to roll back the service if a service deployment fails. If - * rollback is enabled, when a service deployment fails, the service is rolled back to the - * last deployment that completed successfully.
+ *Determines whether to enable Amazon ECS to roll back the service if a service deployment + * fails. If rollback is enabled, when a service deployment fails, the service is rolled + * back to the last deployment that completed successfully.
*/ rollback: boolean | undefined; } @@ -1188,10 +1190,10 @@ export interface DeploymentConfiguration { * if your service has a desired number of four tasks and a minimum healthy percent of 50%, * the scheduler may stop two existing tasks to free up cluster capacity before starting * two new tasks. Tasks for services that do not use a load balancer - * are considered healthy if they are in theRUNNING
state; tasks for services
- * that do use a load balancer are considered healthy if they are in
- * the RUNNING
state and they are reported as healthy by the load balancer.
- * The default value for minimum healthy percent is 100%.
+ * are considered healthy if they're in the RUNNING
state; tasks for services
+ * that do use a load balancer are considered healthy if they're in
+ * the RUNNING
state and they're reported as healthy by the load balancer. The
+ * default value for minimum healthy percent is 100%.
* If a service is using the blue/green (CODE_DEPLOY
) or
* EXTERNAL
deployment types and tasks that use the EC2
* launch type, the minimum healthy percent value is set
@@ -1276,21 +1278,21 @@ export interface LoadBalancer {
/**
*
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or * task set.
- *A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you are using a - * Classic Load Balancer the target group ARN should be omitted.
+ *A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a + * Classic Load Balancer, omit the target group ARN.
*For services using the ECS
deployment controller, you can specify one or
* multiple target groups. For more information, see Registering Multiple Target Groups with a Service in
* the Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you are
- * required to define two target groups for the load balancer. For more information, see
- * Blue/Green Deployment with CodeDeploy in the
+ *
For services using the CODE_DEPLOY
deployment controller, you're required
+ * to define two target groups for the load balancer. For more information, see Blue/Green Deployment with CodeDeploy in the
* Amazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode (which
- * is required for the Fargate launch type), you must choose
- * ip
as the target type, not instance
, when creating
- * your target groups because tasks that use the awsvpc
network mode are
- * associated with an elastic network interface, not an Amazon EC2 instance.
If your service's task definition uses the awsvpc
network mode, you
+ * must choose ip
as the target type, not instance
. Do this
+ * when creating your target groups because tasks that use the awsvpc
+ * network mode are associated with an elastic network interface, not an Amazon EC2
+ * instance. This network mode is required for the Fargate launch
+ * type.
The port on the container to associate with the load balancer. This port must
* correspond to a containerPort
in the task definition the tasks in the
* service are using. For tasks that use the EC2 launch type, the container
- * instance they are launched on must allow ingress traffic on the hostPort
of
+ * instance they're launched on must allow ingress traffic on the hostPort
of
* the port mapping.
The IDs of the subnets associated with the task or service. There is a limit of 16 + *
The IDs of the subnets associated with the task or service. There's a limit of 16
* subnets that can be specified per AwsVpcConfiguration
.
The IDs of the security groups associated with the task or service. If you do not - * specify a security group, the default security group for the VPC is used. There is a + *
The IDs of the security groups associated with the task or service. If you don't
+ * specify a security group, the default security group for the VPC is used. There's a
* limit of 5 security groups that can be specified per
* AwsVpcConfiguration
.
The VPC subnets and security groups associated with a task.
+ *The VPC subnets and security groups that are associated with a task.
*All specified subnets and security groups must be from the same VPC.
*If you are using the Fargate launch type, task placement constraints - * are not supported.
+ *If you're using the Fargate launch type, task placement constraints + * aren't supported.
*spread
placement strategy spreads
* placement across available candidates evenly based on the field
parameter.
* The binpack
strategy places tasks on available candidates that have the
- * least available amount of the resource that is specified with the field
+ * least available amount of the resource that's specified with the field
* parameter. For example, if you binpack on memory, a task is placed on the instance with
- * the least amount of remaining memory (but still enough to run the task).
+ * the least amount of remaining memory but still enough to run the task.
*/
type?: PlacementStrategyType | string;
/**
* The field to apply the placement strategy against. For the spread
* placement strategy, valid values are instanceId
(or host
,
- * which has the same effect), or any platform or custom attribute that is applied to a
+ * which has the same effect), or any platform or custom attribute that's applied to a
* container instance, such as attribute:ecs.availability-zone
. For the
* binpack
placement strategy, valid values are cpu
and
* memory
. For the random
placement strategy, this field is
@@ -1491,7 +1493,7 @@ export enum SchedulingStrategy {
}
/**
- *
Details of the service registry.
+ *The details for the service registry.
*/ export interface ServiceRegistry { /** @@ -1502,32 +1504,32 @@ export interface ServiceRegistry { /** *The port value used if your service discovery service specified an SRV record. This
- * field may be used if both the awsvpc
network mode and SRV records are
+ * field might be used if both the awsvpc
network mode and SRV records are
* used.
The container name value, already specified in the task definition, to be used for
- * your service discovery service. If the task definition that your service task specifies
- * uses the bridge
or host
network mode, you must specify a
- * containerName
and containerPort
combination from the task
- * definition. If the task definition that your service task specifies uses the
+ *
The container name value to be used for your service discovery service. It's already
+ * specified in the task definition. If the task definition that your service task
+ * specifies uses the bridge
or host
network mode, you must
+ * specify a containerName
and containerPort
combination from the
+ * task definition. If the task definition that your service task specifies uses the
* awsvpc
network mode and a type SRV DNS record is used, you must specify
* either a containerName
and containerPort
combination or a
- * port
value, but not both.
port
value. However, you can't specify both.
*/
containerName?: string;
/**
- * The port value, already specified in the task definition, to be used for your service - * discovery service. If the task definition your service task specifies uses the + *
The port value to be used for your service discovery service. It's already specified
+ * in the task definition. If the task definition your service task specifies uses the
* bridge
or host
network mode, you must specify a
* containerName
and containerPort
combination from the task
* definition. If the task definition your service task specifies uses the
* awsvpc
network mode and a type SRV DNS record is used, you must specify
* either a containerName
and containerPort
combination or a
- * port
value, but not both.
port
value. However, you can't specify both.
*/
containerPort?: number;
}
@@ -1543,7 +1545,7 @@ export namespace ServiceRegistry {
export interface CreateServiceRequest {
/**
- * The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. + *
The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. * If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; @@ -1557,10 +1559,10 @@ export interface CreateServiceRequest { /** *The family
and revision
(family:revision
) or
- * full ARN of the task definition to run in your service. If a revision
is
- * not specified, the latest ACTIVE
revision is used.
A task definition must be specified if the service is using either the
- * ECS
or CODE_DEPLOY
deployment controllers.
revision
+ * isn't specified, the latest ACTIVE
revision is used.
+ * A task definition must be specified if the service uses either the ECS
or
+ * CODE_DEPLOY
deployment controllers.
A load balancer object representing the load balancers to use with your service. For * more information, see Service Load Balancing in the * Amazon Elastic Container Service Developer Guide.
- *If the service is using the rolling update (ECS
) deployment controller
- * and using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to
- * attach to the service. The service-linked role is required for services that make use of
- * multiple target groups. For more information, see Using service-linked roles for Amazon ECS in the
+ *
If the service uses the rolling update (ECS
) deployment controller and
+ * using either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach
+ * to the service. The service-linked role is required for services that use multiple
+ * target groups. For more information, see Using service-linked roles for Amazon ECS in the
* Amazon Elastic Container Service Developer Guide.
If the service is using the CODE_DEPLOY
deployment controller, the
- * service is required to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment
- * group, you specify two target groups (referred to as a targetGroupPair
).
- * During a deployment, CodeDeploy determines which task set in your service has the status
- * PRIMARY
and associates one target group with it, and then associates
- * the other target group with the replacement task set. The load balancer can also have up
- * to two listeners: a required listener for production traffic and an optional listener
- * that allows you perform validation tests with Lambda functions before routing production
- * traffic to it.
If the service uses the CODE_DEPLOY
deployment controller, the service is
+ * required to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you
+ * specify two target groups (referred to as a targetGroupPair
). During a
+ * deployment, CodeDeploy determines which task set in your service has the status
+ * PRIMARY
, and it associates one target group with it. Then, it also
+ * associates the other target group with the replacement task set. The load balancer can
+ * also have up to two listeners: a required listener for production traffic and an
+ * optional listener that you can use to perform validation tests with Lambda functions
+ * before routing production traffic to it.
After you create a service using the ECS
deployment controller, the load
- * balancer name or target group ARN, container name, and container port specified in the
- * service definition are immutable. If you are using the CODE_DEPLOY
+ * balancer name or target group ARN, container name, and container port that's specified
+ * in the service definition are immutable. If you use the CODE_DEPLOY
* deployment controller, these values can be changed when updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN, - * the container name (as it appears in a container definition), and the container port to - * access from the load balancer. The load balancer name parameter must be omitted. When a - * task from this service is placed on a container instance, the container instance and - * port combination is registered as a target in the target group specified here.
- *For Classic Load Balancers, this object must contain the load balancer name, the container name (as it - * appears in a container definition), and the container port to access from the load - * balancer. The target group ARN parameter must be omitted. When a task from this service - * is placed on a container instance, the container instance is registered with the load - * balancer specified here.
+ * the container name, and the container port to access from the load balancer. The + * container name must be as it appears in a container definition. The load balancer name + * parameter must be omitted. When a task from this service is placed on a container + * instance, the container instance and port combination is registered as a target in the + * target group that's specified here. + *For Classic Load Balancers, this object must contain the load balancer name, the container name , and + * the container port to access from the load balancer. The container name must be as it + * appears in a container definition. The target group ARN parameter must be omitted. When + * a task from this service is placed on a container instance, the container instance is + * registered with the load balancer that's specified here.
*Services with tasks that use the awsvpc
network mode (for example, those
- * with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are
- * not supported. Also, when you create any target groups for these services, you must
- * choose ip
as the target type, not instance
, because tasks that
- * use the awsvpc
network mode are associated with an elastic network
- * interface, not an Amazon EC2 instance.
ip
as the target type, not instance
. This is because
+ * tasks that use the awsvpc
network mode are associated with an elastic
+ * network interface, not an Amazon EC2 instance.
*/
loadBalancers?: LoadBalancer[];
@@ -1611,7 +1614,7 @@ export interface CreateServiceRequest {
* discovery.
* Each service may be associated with one service registry. Multiple service - * registries per service isn't supported.
+ * registries for each service isn't supported. *The number of instantiations of the specified task definition to place and keep * running on your cluster.
- *This is required if schedulingStrategy
is REPLICA
or is not
- * specified. If schedulingStrategy
is DAEMON
then this is not
+ *
This is required if schedulingStrategy
is REPLICA
or isn't
+ * specified. If schedulingStrategy
is DAEMON
then this isn't
* required.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the - * request. Up to 32 ASCII characters are allowed.
+ *An identifier that you provide to ensure the idempotency of the request. It must be + * unique and is case sensitive. Up to 32 ASCII characters are allowed.
*/ clientToken?: string; /** - *The infrastructure on which to run your service. For more information, see Amazon ECS
+ * The infrastructure that you run your service on. For more information, see Amazon ECS
* launch types in the Amazon Elastic Container Service Developer Guide. The FARGATE
launch type runs your tasks on Fargate On-Demand
* infrastructure.
The EC2
launch type runs your tasks on Amazon EC2 instances registered to your
* cluster.
The EXTERNAL
launch type runs your tasks on your on-premise server or
+ *
The EXTERNAL
launch type runs your tasks on your on-premises server or
* virtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a
* launchType
is specified, the capacityProviderStrategy
@@ -1664,8 +1667,8 @@ export interface CreateServiceRequest {
/**
*
The platform version that your tasks in the service are running on. A platform version
* is specified only for tasks using the Fargate launch type. If one isn't
- * specified, the LATEST
platform version is used by default. For more
- * information, see Fargate platform
+ * specified, the LATEST
platform version is used. For more information, see
+ * Fargate platform
* versions in the Amazon Elastic Container Service Developer Guide.
The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your
* load balancer on your behalf. This parameter is only permitted if you are using a load
- * balancer with your service and your task definition does not use the awsvpc
+ * balancer with your service and your task definition doesn't use the awsvpc
* network mode. If you specify the role
parameter, you must also specify a
* load balancer object with the loadBalancers
parameter.
If your account has already created the Amazon ECS service-linked role, that role is
- * used by default for your service unless you specify a role here. The service-linked
- * role is required if your task definition uses the awsvpc
network mode
- * or if the service is configured to use service discovery, an external deployment
- * controller, multiple target groups, or Elastic Inference accelerators in which case
- * you should not specify a role here. For more information, see Using
+ * used for your service unless you specify a role here. The service-linked role is
+ * required if your task definition uses the awsvpc
network mode or if the
+ * service is configured to use service discovery, an external deployment controller,
+ * multiple target groups, or Elastic Inference accelerators in which case you don't
+ * specify a role here. For more information, see Using
* service-linked roles for Amazon ECS in the
* Amazon Elastic Container Service Developer Guide.
An array of placement constraint objects to use for tasks in your service. You can - * specify a maximum of 10 constraints per task (this limit includes constraints in the - * task definition and those specified at runtime).
+ * specify a maximum of 10 constraints for each task. This limit includes constraints in + * the task definition and those specified at runtime. */ placementConstraints?: PlacementConstraint[]; /** *The placement strategy objects to use for tasks in your service. You can specify a - * maximum of 5 strategy rules per service.
+ * maximum of 5 strategy rules for each service. */ placementStrategy?: PlacementStrategy[]; /** *The network configuration for the service. This parameter is required for task
* definitions that use the awsvpc
network mode to receive their own elastic
- * network interface, and it is not supported for other network modes. For more
- * information, see Task networking
+ * network interface, and it isn't supported for other network modes. For more information,
+ * see Task networking
* in the Amazon Elastic Container Service Developer Guide.
The period of time, in seconds, that the Amazon ECS service scheduler should ignore
- * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used
- * when your service is configured to use a load balancer. If your service has a load
- * balancer defined and you don't specify a health check grace period value, the default
- * value of 0
is used.
The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy
+ * Elastic Load Balancing target health checks after a task has first started. This is only used when your
+ * service is configured to use a load balancer. If your service has a load balancer
+ * defined and you don't specify a health check grace period value, the default value of
+ * 0
is used.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you - * can specify a health check grace period of up to 2,147,483,647 seconds. During that - * time, the Amazon ECS service scheduler ignores health check status. This grace period can - * prevent the service scheduler from marking tasks as unhealthy and stopping them before - * they have time to come up.
+ * can specify a health check grace period of up to + * 2,147,483,647 + * seconds (about 69 years). During that time, the Amazon ECS service + * scheduler ignores health check status. This grace period can prevent the service + * scheduler from marking tasks as unhealthy and stopping them before they have time to + * come up. */ healthCheckGracePeriodSeconds?: number; @@ -1746,9 +1751,8 @@ export interface CreateServiceRequest { * maintains the desired number of tasks across your cluster. By default, the * service scheduler spreads tasks across Availability Zones. You can use task * placement strategies and constraints to customize task placement decisions. This - * scheduler strategy is required if the service is using the - *CODE_DEPLOY
or EXTERNAL
deployment controller
- * types.
+ * scheduler strategy is required if the service uses the CODE_DEPLOY
+ * or EXTERNAL
deployment controller types.
* @@ -1756,7 +1760,7 @@ export interface CreateServiceRequest { * task on each active container instance that meets all of the task placement * constraints that you specify in your cluster. The service scheduler also * evaluates the task placement constraints for running tasks and will stop tasks - * that do not meet the placement constraints. When you're using this strategy, you + * that don't meet the placement constraints. When you're using this strategy, you * don't need to specify a desired number of tasks, a task placement strategy, or * use Service Auto Scaling policies.
*The metadata that you apply to the service to help you categorize and organize them. * Each tag consists of a key and an optional value, both of which you define. When a * service is deleted, the tags are deleted as well.
- *The following basic restrictions apply to tags:
+ *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -1822,15 +1826,15 @@ export interface CreateServiceRequest { /** *Specifies whether to propagate the tags from the task definition or the service to the - * tasks in the service. If no value is specified, the tags are not propagated. Tags can + * tasks in the service. If no value is specified, the tags aren't propagated. Tags can * only be propagated to the tasks within the service during service creation. To add tags - * to a task after service creation or task creation, use the TagResource API - * action.
+ * to a task after service creation or task creation, use the TagResource + * API action. */ propagateTags?: PropagateTags | string; /** - *Whether or not the execute command functionality is enabled for the service. If + *
Determines whether the execute command functionality is enabled for the service. If
* true
, this enables execute command functionality on all containers in
* the service tasks.
The status of the deployment. The following describes each state:
+ *The status of the deployment. The following describes each state.
*The Unix timestamp for when the service deployment was created.
+ *The Unix timestamp for the time when the service deployment was created.
*/ createdAt?: Date; /** - *The Unix timestamp for when the service deployment was last updated.
+ *The Unix timestamp for the time when the service deployment was last updated.
*/ updatedAt?: Date; @@ -1940,17 +1944,19 @@ export interface Deployment { launchType?: LaunchType | string; /** - *The platform version on which your tasks in the service are running. A platform
- * version is only specified for tasks using the Fargate launch type. If one
- * is not specified, the LATEST
platform version is used by default. For more
- * information, see Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer Guide.
The platform version that your tasks in the service run on. A platform version is only
+ * specified for tasks using the Fargate launch type. If one isn't specified,
+ * the LATEST
platform version is used. For more information, see Fargate Platform Versions in the
+ * Amazon Elastic Container Service Developer Guide.
The operating system that your tasks in the service, or tasks are running on. A platform family is specified only for tasks using the Fargate launch type.
- * All tasks that run as part of this service must use the same platformFamily
value as the service, for example, LINUX.
.
The operating system that your tasks in the service, or tasks are running on. A + * platform family is specified only for tasks using the Fargate launch type.
+ * All tasks that run as part of this service must use the same
+ * platformFamily
value as the service, for example,
+ * LINUX.
.
The rolloutState
of a service is only returned for services that use
- * the rolling update (ECS
) deployment type that are not behind a
+ * the rolling update (ECS
) deployment type that aren't behind a
* Classic Load Balancer.
The rollout state of the deployment. When a service deployment is started, it begins
* in an IN_PROGRESS
state. When the service reaches a steady state, the
- * deployment will transition to a COMPLETED
state. If the service fails to
- * reach a steady state and circuit breaker is enabled, the deployment will transition to a
- * FAILED
state. A deployment in FAILED
state will launch no
- * new tasks. For more information, see DeploymentCircuitBreaker.
COMPLETED
state. If the service fails to reach
+ * a steady state and circuit breaker is enabled, the deployment transitions to a
+ * FAILED
state. A deployment in FAILED
state doesn't launch
+ * any new tasks. For more information, see DeploymentCircuitBreaker.
*/
rolloutState?: DeploymentRolloutState | string;
@@ -1991,16 +1997,16 @@ export namespace Deployment {
}
/**
- * Details on an event associated with a service.
+ *The details for an event that's associated with a service.
*/ export interface ServiceEvent { /** - *The ID string of the event.
+ *The ID string for the event.
*/ id?: string; /** - *The Unix timestamp for when the event was triggered.
+ *The Unix timestamp for the time when the event was triggered.
*/ createdAt?: Date; @@ -2082,15 +2088,15 @@ export interface TaskSet { clusterArn?: string; /** - *The tag specified when a task set is started. If the task set is created by an CodeDeploy
- * deployment, the startedBy
parameter is CODE_DEPLOY
. For a task
- * set created for an external deployment, the startedBy field isn't used.
The tag specified when a task set is started. If an CodeDeploy deployment created the task
+ * set, the startedBy
parameter is CODE_DEPLOY
. If an external
+ * deployment created the task set, the startedBy field isn't used.
The external ID associated with the task set.
- *If a task set is created by an CodeDeploy deployment, the externalId
parameter
+ *
If an CodeDeploy deployment created a task set, the externalId
parameter
* contains the CodeDeploy deployment ID.
If a task set is created for an external deployment and is associated with a service
* discovery registry, the externalId
parameter contains the
@@ -2099,7 +2105,7 @@ export interface TaskSet {
externalId?: string;
/**
- *
The status of the task set. The following describes each state:
+ *The status of the task set. The following describes each state.
*The task set is not serving production traffic.
+ *The task set isn't serving production traffic.
*The tasks in the task set are being stopped and their corresponding + *
The tasks in the task set are being stopped, and their corresponding * targets are being deregistered from their target group.
*The task definition the task set is using.
+ *The task definition that the task set is using.
*/ taskDefinition?: string; @@ -2135,7 +2141,7 @@ export interface TaskSet { *The number of tasks in the task set that are in the PENDING
status during
* a deployment. A task in the PENDING
state is preparing to enter the
* RUNNING
state. A task set enters the PENDING
status when
- * it launches for the first time or when it is restarted after being in the
+ * it launches for the first time or when it's restarted after being in the
* STOPPED
state.
The Unix timestamp for when the task set was created.
+ *The Unix timestamp for the time when the task set was created.
*/ createdAt?: Date; /** - *The Unix timestamp for when the task set was last updated.
+ *The Unix timestamp for the time when the task set was last updated.
*/ updatedAt?: Date; @@ -2164,20 +2170,20 @@ export interface TaskSet { launchType?: LaunchType | string; /** - *The capacity provider strategy associated with the task set.
+ *The capacity provider strategy that are associated with the task set.
*/ capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** - *The Fargate platform version on which the tasks in the task set are running. A - * platform version is only specified for tasks run on Fargate. For more information, see - * Fargate platform - * versions in the Amazon Elastic Container Service Developer Guide.
+ *The Fargate platform version where the tasks in the task set are running. A platform + * version is only specified for tasks run on Fargate. For more information, see Fargate platform versions in the + * Amazon Elastic Container Service Developer Guide.
*/ platformVersion?: string; /** - *The operating system that your tasks in the set are running on. A platform family is specified only for tasks using the Fargate launch type.
+ *The operating system that your tasks in the set are running on. A platform family is + * specified only for tasks that use the Fargate launch type.
*All tasks in the set must have the same value.
*/ platformFamily?: string; @@ -2188,26 +2194,26 @@ export interface TaskSet { networkConfiguration?: NetworkConfiguration; /** - *Details on a load balancer that is used with a task set.
+ *Details on a load balancer that are used with a task set.
*/ loadBalancers?: LoadBalancer[]; /** - *The details of the service discovery registries to assign to this task set. For more + *
The details for the service discovery registries to assign to this task set. For more * information, see Service * discovery.
*/ serviceRegistries?: ServiceRegistry[]; /** - *A floating-point percentage of the desired number of tasks to place and keep running + *
A floating-point percentage of your desired number of tasks to place and keep running * in the task set.
*/ scale?: Scale; /** - *The stability status, which indicates whether the task set has reached a steady state. - * If the following conditions are met, the task set will be in + *
The stability status. This indicates whether the task set has reached a steady state.
+ * If the following conditions are met, the task set sre in
* STEADY_STATE
:
The pendingCount
is 0
.
There are no tasks running on container instances in the DRAINING
- * status.
There are no tasks that are running on container instances in the
+ * DRAINING
status.
All tasks are reporting a healthy status from the load balancers, service * discovery, and container health checks.
*If any of those conditions are not met, the stability status returns + *
If any of those conditions aren't met, the stability status returns
* STABILIZING
.
The Unix timestamp for when the task set stability status was retrieved.
+ *The Unix timestamp for the time when the task set stability status was + * retrieved.
*/ stabilityStatusAt?: Date; /** *The metadata that you apply to the task set to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
+ * Each tag consists of a key and an optional value. You define both. *The following basic restrictions apply to tags:
*The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within - * a cluster, but you can have similarly named services in multiple clusters within a + * a cluster. However, you can have similarly named services in multiple clusters within a * Region or across multiple Regions.
*/ serviceName?: string; @@ -2305,14 +2312,14 @@ export interface Service { clusterArn?: string; /** - *A list of Elastic Load Balancing load balancer objects, containing the load balancer name, the - * container name (as it appears in a container definition), and the container port to - * access from the load balancer.
+ *A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the + * container name, and the container port to access from the load balancer. The container + * name is as it appears in a container definition.
*/ loadBalancers?: LoadBalancer[]; /** - *The details of the service discovery registries to assign to this service. For more + *
The details for the service discovery registries to assign to this service. For more * information, see Service * Discovery.
*/ @@ -2347,23 +2354,25 @@ export interface Service { launchType?: LaunchType | string; /** - *The capacity provider strategy the service is using. When using the DescribeServices - * API, this field is omitted if the service was created using a launch type.
+ *The capacity provider strategy the service uses. When using the DescribeServices API, + * this field is omitted if the service was created using a launch type.
*/ capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** - *The platform version on which to run your service. A platform version is only
- * specified for tasks hosted on Fargate. If one is not specified, the
- * The platform version to run your service on. A platform version is only specified for
+ * tasks that are hosted on Fargate. If one isn't specified, the The operating system that your tasks in the service are running on. A platform family is specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same The operating system that your tasks in the service run on. A platform family is
+ * specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same
+ * The ARN of the IAM role associated with the service that allows the Amazon ECS container
- * agent to register container instances with an Elastic Load Balancing load balancer. The ARN of the IAM role that's associated with the service. It allows the Amazon ECS
+ * container agent to register container instances with an Elastic Load Balancing load balancer. The Unix timestamp for when the service was created. The Unix timestamp for the time when the service was created. The scheduling strategy to use for the service. For more information, see Services. There are two service scheduler strategies available: There are two service scheduler strategies available.
@@ -2446,12 +2455,14 @@ export interface Service {
*
* LATEST
platform version is used by default. For more information, see
- * Fargate Platform
+ * LATEST
+ * platform version is used. For more information, see Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.platformFamily
value as the service, for example, LINUX
.platformFamily
value as the service (for example,
+ * LINUX
).
*
DAEMON
-The daemon scheduling strategy deploys exactly one
- * task on each active container instance that meets all of the task placement
- * constraints that you specify in your cluster. The service scheduler also
- * evaluates the task placement constraints for running tasks and will stop tasks
- * that do not meet the placement constraints.
Fargate tasks do not support the DAEMON
+ *
Fargate tasks don't support the DAEMON
* scheduling strategy.
The deployment controller type the service is using. When using the DescribeServices
- * API, this field is omitted if the service is using the ECS
deployment
+ * API, this field is omitted if the service uses the ECS
deployment
* controller type.
The metadata that you apply to the service to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define.
+ * Each tag consists of a key and an optional value. You define bot the key and + * value. *The following basic restrictions apply to tags:
*Specifies whether to enable Amazon ECS managed tags for the tasks in the service. For more + *
Determines whether to enable Amazon ECS managed tags for the tasks in the service. For more * information, see Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.
*/ enableECSManagedTags?: boolean; /** - *Specifies whether to propagate the tags from the task definition or the service to the - * task. If no value is specified, the tags are not propagated.
+ *Determines whether to propagate the tags from the task definition or the service to + * the task. If no value is specified, the tags aren't propagated.
*/ propagateTags?: PropagateTags | string; /** - *Whether or not the execute command functionality is enabled for the service. If + *
Determines whether the execute command functionality is enabled for the service. If
* true
, the execute command functionality is enabled for all containers
* in tasks as part of the service.
The full description of your service following the create call.
*A service will return either a capacityProviderStrategy
or
- * launchType
parameter, but not both, depending on which one was
- * specified during creation.
launchType
parameter, but not both, depending where one was specified
+ * when it was created.
* If a service is using the ECS
deployment controller, the
* deploymentController
and taskSets
parameters will not be
* returned.
If the service is using the CODE_DEPLOY
deployment controller, the
+ *
if the service uses the CODE_DEPLOY
deployment controller, the
* deploymentController
, taskSets
and
* deployments
parameters will be returned, however the
* deployments
parameter will be an empty list.
The specified platform version does not satisfy the task definition's required - * capabilities.
+ *The specified platform version doesn't satisfy the required capabilities of the task + * definition.
*/ export interface PlatformTaskDefinitionIncompatibilityException extends __SmithyException, $MetadataBearer { name: "PlatformTaskDefinitionIncompatibilityException"; @@ -2584,7 +2596,7 @@ export namespace PlatformTaskDefinitionIncompatibilityException { } /** - *The specified platform version does not exist.
+ *The specified platform version doesn't exist.
*/ export interface PlatformUnknownException extends __SmithyException, $MetadataBearer { name: "PlatformUnknownException"; @@ -2602,7 +2614,7 @@ export namespace PlatformUnknownException { } /** - *The specified task is not supported in this Region.
+ *The specified task isn't supported in this Region.
*/ export interface UnsupportedFeatureException extends __SmithyException, $MetadataBearer { name: "UnsupportedFeatureException"; @@ -2663,8 +2675,8 @@ export interface CreateTaskSetRequest { serviceRegistries?: ServiceRegistry[]; /** - *The launch type that new tasks in the task set will use. For more information, see - * Amazon ECS Launch Types in the Amazon Elastic Container Service Developer Guide.
+ *The launch type that new tasks in the task set uses. For more information, see Amazon ECS + * Launch Types in the Amazon Elastic Container Service Developer Guide.
*If a launchType
is specified, the capacityProviderStrategy
* parameter must be omitted.
The platform version that the tasks in the task set should use. A platform version is + *
The platform version that the tasks in the task set uses. A platform version is
* specified only for tasks using the Fargate launch type. If one isn't
- * specified, the LATEST
platform version is used by default.
LATEST
platform version is used.
*/
platformVersion?: string;
@@ -2707,16 +2719,16 @@ export interface CreateTaskSetRequest {
scale?: Scale;
/**
- * Unique, case-sensitive identifier that you provide to ensure the idempotency of the - * request. Up to 32 ASCII characters are allowed.
+ *The identifier that you provide to ensure the idempotency of the request. It's case + * sensitive and must be unique. It can be up to 32 ASCII characters are allowed.
*/ clientToken?: string; /** *The metadata that you apply to the task set to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define. When a - * service is deleted, the tags are deleted as well.
- *The following basic restrictions apply to tags:
+ * Each tag consists of a key and an optional value. You define both. When a service is + * deleted, the tags are deleted. + *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -2780,8 +2792,8 @@ export namespace CreateTaskSetResponse { } /** - *The specified service is not active. You can't update a service that is inactive. If - * you have previously deleted a service, you can re-create it with CreateService.
+ *The specified service isn't active. You can't update a service that's inactive. If you + * have previously deleted a service, you can re-create it with CreateService.
*/ export interface ServiceNotActiveException extends __SmithyException, $MetadataBearer { name: "ServiceNotActiveException"; @@ -2799,9 +2811,8 @@ export namespace ServiceNotActiveException { } /** - *The specified service could not be found. You can view your available services with - * ListServices. Amazon ECS services are cluster-specific and - * Region-specific.
+ *The specified service wasn't found. You can view your available services with ListServices. Amazon ECS services are cluster specific and Region + * specific.
*/ export interface ServiceNotFoundException extends __SmithyException, $MetadataBearer { name: "ServiceNotFoundException"; @@ -2828,7 +2839,7 @@ export enum SettingName { export interface DeleteAccountSettingRequest { /** - *The resource name for which to disable the account setting. If + *
The resource name to disable the account setting for. If
* serviceLongArnFormat
is specified, the ARN for your Amazon ECS services is
* affected. If taskLongArnFormat
is specified, the ARN and resource ID for
* your Amazon ECS tasks is affected. If containerInstanceLongArnFormat
is
@@ -2839,11 +2850,11 @@ export interface DeleteAccountSettingRequest {
name: SettingName | string | undefined;
/**
- *
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If you - * specify the root user, it disables the account setting for all IAM users, IAM roles, and - * the root user of the account unless an IAM user or role explicitly overrides these - * settings. If this field is omitted, the setting is changed only for the authenticated - * user.
+ *The Amazon Resource Name (ARN) of the principal. It can be an IAM user, IAM role, or + * the root user. If you specify the root user, it disables the account setting for all IAM + * users, IAM roles, and the root user of the account unless an IAM user or role explicitly + * overrides these settings. If this field is omitted, the setting is changed only for the + * authenticated user.
*/ principalArn?: string; } @@ -2867,13 +2878,14 @@ export interface Setting { name?: SettingName | string; /** - *Whether the account setting is enabled or disabled for the specified resource.
+ *Determines whether the account setting is enabled or disabled for the specified + * resource.
*/ value?: string; /** - *The ARN of the principal, which can be an IAM user, IAM role, or the root user. If - * this field is omitted, the authenticated user is assumed.
+ *The ARN of the principal. It can be an IAM user, IAM role, or the root user. If this + * field is omitted, the authenticated user is assumed.
*/ principalArn?: string; } @@ -2908,29 +2920,29 @@ export enum TargetType { } /** - *An attribute is a name-value pair associated with an Amazon ECS object. Attributes enable - * you to extend the Amazon ECS data model by adding custom metadata to your resources. For more - * information, see Attributes in the Amazon Elastic Container Service Developer Guide.
+ *An attribute is a name-value pair that's associated with an Amazon ECS object. Attributes + * enable you to extend the Amazon ECS data model by adding custom metadata to your resources. + * For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.
*/ export interface Attribute { /** *The name of the attribute. The name
must contain between 1 and 128
- * characters and name may contain letters (uppercase and lowercase), numbers, hyphens,
- * underscores, forward slashes, back slashes, or periods.
The value of the attribute. The value
must contain between 1 and 128
- * characters and may contain letters (uppercase and lowercase), numbers, hyphens,
- * underscores, periods, at signs (@), forward slashes, back slashes, colons, or spaces.
- * The value cannot contain any leading or trailing whitespace.
The type of the target with which to attach the attribute. This parameter is required - * if you use the short form ID for a resource instead of the full ARN.
+ *The type of the target to attach the attribute with. This parameter is required if you + * use the short form ID for a resource instead of the full ARN.
*/ targetType?: TargetType | string; @@ -2958,8 +2970,8 @@ export interface DeleteAttributesRequest { cluster?: string; /** - *The attributes to delete from your resource. You can specify up to 10 attributes per - * request. For custom attributes, specify the attribute name and target ID, but do not + *
The attributes to delete from your resource. You can specify up to 10 attributes for + * each request. For custom attributes, specify the attribute name and target ID, but don't * specify the value. If you specify the target ID using the short form, you must also * specify the target type.
*/ @@ -2992,8 +3004,8 @@ export namespace DeleteAttributesResponse { } /** - *The specified target could not be found. You can view your available container - * instances with ListContainerInstances. Amazon ECS container instances are + *
The specified target wasn't found. You can view your available container instances + * with ListContainerInstances. Amazon ECS container instances are * cluster-specific and Region-specific.
*/ export interface TargetNotFoundException extends __SmithyException, $MetadataBearer { @@ -3044,7 +3056,7 @@ export namespace DeleteCapacityProviderResponse { } /** - *You cannot delete a cluster that has registered container instances. First, deregister + *
You can't delete a cluster that has registered container instances. First, deregister * the container instances before you can delete the cluster. For more information, see * DeregisterContainerInstance.
*/ @@ -3064,9 +3076,9 @@ export namespace ClusterContainsContainerInstancesException { } /** - *You cannot delete a cluster that contains services. First, update the service to - * reduce its desired task count to 0 and then delete the service. For more information, - * see UpdateService and DeleteService.
+ *You can't delete a cluster that contains services. First, update the service to reduce + * its desired task count to 0, and then delete the service. For more information, see + * UpdateService and DeleteService.
*/ export interface ClusterContainsServicesException extends __SmithyException, $MetadataBearer { name: "ClusterContainsServicesException"; @@ -3084,7 +3096,7 @@ export namespace ClusterContainsServicesException { } /** - *You cannot delete a cluster that has active tasks.
+ *You can't delete a cluster that has active tasks.
*/ export interface ClusterContainsTasksException extends __SmithyException, $MetadataBearer { name: "ClusterContainsTasksException"; @@ -3146,9 +3158,9 @@ export interface DeleteServiceRequest { service: string | undefined; /** - *If true
, allows you to delete a service even if it has not been scaled
- * down to zero tasks. It is only necessary to use this if the service is using the
- * REPLICA
scheduling strategy.
If true
, allows you to delete a service even if it wasn't scaled down to
+ * zero tasks. It's only necessary to use this if the service uses the REPLICA
+ * scheduling strategy.
The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task - * set exists in to delete.
+ * set found in to delete. */ cluster: string | undefined; @@ -3197,8 +3209,8 @@ export interface DeleteTaskSetRequest { taskSet: string | undefined; /** - *If true
, this allows you to delete a task set even if it hasn't been
- * scaled down to zero.
If true
, you can delete a task set even if it hasn't been scaled down to
+ * zero.
The specified task set could not be found. You can view your available task sets with - * DescribeTaskSets. Task sets are specific to each cluster, service - * and Region.
+ *The specified task set wasn't found. You can view your available task sets with DescribeTaskSets. Task sets are specific to each cluster, service and + * Region.
*/ export interface TaskSetNotFoundException extends __SmithyException, $MetadataBearer { name: "TaskSetNotFoundException"; @@ -3262,12 +3273,12 @@ export interface DeregisterContainerInstanceRequest { containerInstance: string | undefined; /** - *Forces the deregistration of the container instance. If you have tasks running on the + *
Forces the container instance to be deregistered. If you have tasks running on the
* container instance when you deregister it with the force
option, these
* tasks remain running until you terminate the instance or the tasks stop through some
- * other means, but they are orphaned (no longer monitored or accounted for by Amazon ECS). If
- * an orphaned task on your container instance is part of an Amazon ECS service, then the
- * service scheduler starts another copy of that task, on a different container instance if
+ * other means, but they're orphaned (no longer monitored or accounted for by Amazon ECS). If an
+ * orphaned task on your container instance is part of an Amazon ECS service, then the service
+ * scheduler starts another copy of that task, on a different container instance if
* possible.
Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer * target group are deregistered. They begin connection draining according to the settings @@ -3285,6 +3296,78 @@ export namespace DeregisterContainerInstanceRequest { }); } +export enum InstanceHealthCheckState { + IMPAIRED = "IMPAIRED", + INITIALIZING = "INITIALIZING", + INSUFFICIENT_DATA = "INSUFFICIENT_DATA", + OK = "OK", +} + +export enum InstanceHealthCheckType { + CONTAINER_RUNTIME = "CONTAINER_RUNTIME", +} + +/** + *
An object representing the result of a container instance health status check.
+ */ +export interface InstanceHealthCheckResult { + /** + *The type of container instance health status that was verified.
+ */ + type?: InstanceHealthCheckType | string; + + /** + *The container instance health status.
+ */ + status?: InstanceHealthCheckState | string; + + /** + *The Unix timestamp for when the container instance health status was last + * updated.
+ */ + lastUpdated?: Date; + + /** + *The Unix timestamp for when the container instance health status last changed.
+ */ + lastStatusChange?: Date; +} + +export namespace InstanceHealthCheckResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InstanceHealthCheckResult): any => ({ + ...obj, + }); +} + +/** + *An object representing the health status of the container instance.
+ */ +export interface ContainerInstanceHealthStatus { + /** + *The overall health status of the container instance. This is an aggregate status of + * all container instance health checks.
+ */ + overallStatus?: InstanceHealthCheckState | string; + + /** + *An array of objects representing the details of the container instance health + * status.
+ */ + details?: InstanceHealthCheckResult[]; +} + +export namespace ContainerInstanceHealthStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ContainerInstanceHealthStatus): any => ({ + ...obj, + }); +} + /** *Describes the resources available for a container instance.
*/ @@ -3296,7 +3379,7 @@ export interface Resource { name?: string; /** - *The type of the resource, such as INTEGER
, DOUBLE
,
+ *
The type of the resource. Valid values: INTEGER
, DOUBLE
,
* LONG
, or STRINGSET
.
The Docker version running on the container instance.
+ *The Docker version that's running on the container instance.
*/ dockerVersion?: string; } @@ -3367,7 +3450,7 @@ export namespace VersionInfo { } /** - *An EC2 instance that is running the Amazon ECS agent and has been registered with a + *
An EC2 instance that's running the Amazon ECS agent and has been registered with a * cluster.
*/ export interface ContainerInstance { @@ -3383,14 +3466,14 @@ export interface ContainerInstance { ec2InstanceId?: string; /** - *The capacity provider associated with the container instance.
+ *The capacity provider that's associated with the container instance.
*/ capacityProviderName?: string; /** *The version counter for the container instance. Every time a container instance
* experiences a change that triggers a CloudWatch event, the version counter is
- * incremented. If you are replicating your Amazon ECS container instance state with CloudWatch
+ * incremented. If you're replicating your Amazon ECS container instance state with CloudWatch
* Events, you can compare the version of a container instance reported by the Amazon ECS APIs
* with the version reported in CloudWatch Events for the container instance (inside the
* detail
object) to verify that the version in your event stream is
@@ -3406,12 +3489,11 @@ export interface ContainerInstance {
/**
*
For CPU and memory resource types, this parameter describes the remaining CPU and
- * memory that has not already been allocated to tasks and is therefore available for new
- * tasks. For port resource types, this parameter describes the ports that were reserved by
- * the Amazon ECS container agent (at instance registration time) and any task containers that
- * have reserved port mappings on the host (with the host
or
- * bridge
network mode). Any port that is not specified here is available
- * for new tasks.
host
or bridge
+ * network mode). Any port that's not specified here is available for new tasks.
*/
remainingResources?: Resource[];
@@ -3439,7 +3521,7 @@ export interface ContainerInstance {
* network interface is deprovisioned. The instance then transitions to an
* INACTIVE
status.
* The ACTIVE
status indicates that the container instance can accept tasks.
- * The DRAINING
indicates that new tasks are not placed on the container
+ * The DRAINING
indicates that new tasks aren't placed on the container
* instance and any service tasks running on the container instance are removed if
* possible. For more information, see Container Instance Draining in the
* Amazon Elastic Container Service Developer Guide.
The status of the most recent agent update. If an update has never been requested,
- * this value is NULL
.
The status of the most recent agent update. If an update wasn't ever requested, this
+ * value is NULL
.
The Unix timestamp for when the container instance was registered.
+ *The Unix timestamp for the time when the container instance was registered.
*/ registeredAt?: Date; @@ -3497,9 +3579,8 @@ export interface ContainerInstance { /** *The metadata that you apply to the container instance to help you categorize and - * organize them. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
+ * organize them. Each tag consists of a key and an optional value. You define both. + *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -3532,6 +3613,11 @@ export interface ContainerInstance { *An object representing the health status of the container instance.
+ */ + healthStatus?: ContainerInstanceHealthStatus; } export namespace ContainerInstance { @@ -3598,21 +3684,20 @@ export enum ContainerCondition { * to enable container dependencies. However, we recommend using the latest container agent * version. For information about checking your agent version and updating to the latest * version, see Updating the Amazon ECS - * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are - * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the + * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using + * an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the *ecs-init
package. If your container instances are launched from version
* 20190301
or later, then they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
* For tasks using the Fargate launch type, the task or service requires the followiwng platforms:
+ *For tasks that use the Fargate launch type, the task or service + * requires the following platforms:
*Linux platform
- * version 1.3.0
or later.
Linux platform version 1.3.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
COMPLETE
- This condition validates that a dependent
* container runs to completion (exits) before permitting other containers to
* start. This can be useful for nonessential containers that run a script and then
- * exit. This condition cannot be set on an essential container.
+ * exit. This condition can't be set on an essential container.
*
* SUCCESS
- This condition is the same as
* COMPLETE
, but it also requires that the container exits with a
- * zero
status. This condition cannot be set on an essential
+ * zero
status. This condition can't be set on an essential
* container.
.env
file
* extension. Each line in an environment file should contain an environment variable in
* VARIABLE=VALUE
format. Lines beginning with #
are treated
- * as comments and are ignored. For more information on the environment variable file
+ * as comments and are ignored. For more information about the environment variable file
* syntax, see Declare default
* environment variables in file.
* If there are environment variables specified using the environment
* parameter in a container definition, they take precedence over the variables contained
* within an environment file. If multiple environment files are specified that contain the
- * same variable, they are processed from the top down. It is recommended to use unique
+ * same variable, they're processed from the top down. We recommend that you use unique
* variable names. For more information, see Specifying environment
* variables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the following platform versions:
+ *This parameter is only supported for tasks hosted on Fargate using the + * following platform versions:
*Linux platform
- * version 1.4.0
or later.
Linux platform version 1.4.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
* UNKNOWN
-The container health check is being evaluated or
- * there is no container health check defined.
The following describes the possible healthStatus
values for a task. The
@@ -3846,13 +3930,13 @@ export namespace FirelensConfiguration {
* Amazon ECS Container Agent.
Container health checks are supported for Fargate tasks if you
- * are using platform version 1.1.0 or greater. For more information, see Fargate
+ * Container health checks are supported for Fargate tasks if
+ * you're using platform version 1.1.0 or greater. For more information, see Fargate
* Platform Versions.
Container health checks are not supported for tasks that are part of a service - * that is configured to use a Classic Load Balancer.
+ *Container health checks aren't supported for tasks that are part of a service + * that's configured to use a Classic Load Balancer.
*CMD
to execute the command
* arguments directly, or CMD-SHELL
to run the command with the container's
* default shell.
- * When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, you should enclose the list of commands in brackets, as shown below.
+ *When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list + * of commands in brackets.
*
* [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ]
*
You do not need to include the brackets when you use the Amazon Web Services Management Consoleas shown below.
+ *You don't need to include the brackets when you use the Amazon Web Services Management Console.
*
* "CMD-SHELL", "curl -f http://localhost/ || exit 1"
*
The optional grace period within which to provide containers time to bootstrap before
- * failed health checks count towards the maximum number of retries. You may specify
- * between 0 and 300 seconds. The startPeriod
is disabled by default.
The optional grace period to provide containers time to bootstrap before failed health
+ * checks count towards the maximum number of retries. You can specify between 0 and 300
+ * seconds. By default, the startPeriod
is disabled.
If a health check succeeds within the startPeriod
, then the container
* is considered healthy and any subsequent failures count toward the maximum number of
@@ -3919,10 +4004,10 @@ export namespace HealthCheck {
/**
*
The Linux capabilities for the container that are added to or dropped from the default - * configuration provided by Docker. For more information on the default capabilities and - * the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run - * reference. For more detailed information on these Linux capabilities, see - * the capabilities(7) Linux manual page.
+ * configuration provided by Docker. For more information about the default capabilities + * and the non-default available capabilities, see Runtime privilege and Linux capabilities in the Docker run + * reference. For more detailed information about these Linux capabilities, + * see the capabilities(7) Linux manual page. */ export interface KernelCapabilities { /** @@ -4069,8 +4154,8 @@ export interface LinuxParameters { *Devices
in the Create a container section of the
* Docker Remote API and the --device
option to docker run.
* If you are using tasks that use the Fargate launch type, the
- * devices
parameter is not supported.
If you're using tasks that use the Fargate launch type, the
+ * devices
parameter isn't supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This
* parameter maps to the --tmpfs
option to docker run.
If you are using tasks that use the Fargate launch type, the
- * tmpfs
parameter is not supported.
If you're using tasks that use the Fargate launch type, the
+ * tmpfs
parameter isn't supported.
maxSwap
value
* must be set for the swappiness
parameter to be used.
* If you are using tasks that use the Fargate launch type, the
- * maxSwap
parameter is not supported.
If you're using tasks that use the Fargate launch type, the
+ * maxSwap
parameter isn't supported.
maxSwap
then this parameter is ignored. This parameter maps to the
* --memory-swappiness
option to docker run.
* If you are using tasks that use the Fargate launch type, the
- * swappiness
parameter is not supported.
If you're using tasks that use the Fargate launch type, the
+ * swappiness
parameter isn't supported.
The secret to expose to the container. The supported values are either the full ARN of * the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.
*If the SSM Parameter Store parameter exists in the same Region as the task you - * are launching, then you can use either the full ARN or name of the parameter. If the - * parameter exists in a different Region, then the full ARN must be specified.
+ *If the SSM Parameter Store parameter exists in the same Region as the task + * you're launching, then you can use either the full ARN or name of the parameter. If + * the parameter exists in a different Region, then the full ARN must be + * specified.
*docker
* run
* .
- * By default, containers use the same logging driver that the Docker daemon uses; - * however the container may use a different logging driver than the Docker daemon by + *
By default, containers use the same logging driver that the Docker daemon uses. + * However, the container might use a different logging driver than the Docker daemon by * specifying a log driver configuration in the container definition. For more information - * on the options for different supported log drivers, see Configure logging + * about the options for different supported log drivers, see Configure logging * drivers in the Docker documentation.
- *The following should be noted when specifying a log configuration for your - * containers:
+ *Understand the following when specifying a log configuration for your + * containers.
*Amazon ECS currently supports a subset of the logging drivers available to the @@ -4224,17 +4310,17 @@ export namespace Secret { * your container instance.
*For tasks hosted on Amazon EC2 instances, the Amazon ECS container agent must register - * the available logging drivers with the + *
For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must
+ * register the available logging drivers with the
* ECS_AVAILABLE_LOGGING_DRIVERS
environment variable before
* containers placed on that instance can use these log configuration options. For
* more information, see Amazon ECS container agent configuration in the
* Amazon Elastic Container Service Developer Guide.
For tasks on Fargate, because you do not have access to the underlying - * infrastructure your tasks are hosted on, any additional software needed will - * have to be installed outside of the task. For example, the Fluentd output + *
For tasks that are on Fargate, because you don't have access to the + * underlying infrastructure your tasks are hosted on, any additional software + * needed must be installed outside of the task. For example, the Fluentd output * aggregators or a remote host running Logstash to send Gelf logs to.
*For more information about using the awsfirelens
log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that is not listed, you can fork the Amazon ECS container
- * agent project that is available
+ * If you have a custom driver that isn't listed, you can fork the Amazon ECS container
+ * agent project that's available
* on GitHub and customize it to work with that driver. We encourage you to
* submit pull requests for changes that you would like to have included. However, we
- * do not currently provide support for running modified copies of this
- * software.
Details on a volume mount point that is used in a container definition.
+ *Details for a volume mount point that's used in a container definition.
*/ export interface MountPoint { /** @@ -4326,13 +4411,13 @@ export enum TransportProtocol { *Port mappings allow containers to access ports on the host container instance to send * or receive traffic. Port mappings are specified as part of the container * definition.
- *If you are using containers in a task with the awsvpc
or
- * host
network mode, exposed ports should be specified using
- * containerPort
. The hostPort
can be left blank or it must
- * be the same value as the containerPort
.
If you use containers in a task with the awsvpc
or host
+ * network mode, specify the exposed ports using containerPort
. The
+ * hostPort
can be left blank or it must be the same value as the
+ * containerPort
.
You cannot expose the same container port for multiple protocols. An error will be - * returned if this is attempted
+ *You can't expose the same container port for multiple protocols. If you attempt + * this, an error is returned.
*After a task reaches the RUNNING
status, manual and automatic host and
* container port assignments are visible in the networkBindings
section of
@@ -4340,27 +4425,26 @@ export enum TransportProtocol {
*/
export interface PortMapping {
/**
- *
The port number on the container that is bound to the user-specified or automatically + *
The port number on the container that's bound to the user-specified or automatically * assigned host port.
- *If you are using containers in a task with the awsvpc
or
- * host
network mode, exposed ports should be specified using
- * containerPort
.
If you are using containers in a task with the bridge
network mode and
- * you specify a container port and not a host port, your container automatically receives
- * a host port in the ephemeral port range. For more information, see
- * hostPort
. Port mappings that are automatically assigned in this way do not
- * count toward the 100 reserved ports limit of a container instance.
If you use containers in a task with the awsvpc
or host
+ * network mode, specify the exposed ports using containerPort
.
If you use containers in a task with the bridge
network mode and you
+ * specify a container port and not a host port, your container automatically receives a
+ * host port in the ephemeral port range. For more information, see hostPort
.
+ * Port mappings that are automatically assigned in this way do not count toward the 100
+ * reserved ports limit of a container instance.
The port number on the container instance to reserve for your container.
- *If you are using containers in a task with the awsvpc
or
- * host
network mode, the hostPort
can either be left blank
- * or set to the same value as the containerPort
.
If you are using containers in a task with the bridge
network mode, you
- * can specify a non-reserved host port for your container port mapping, or you can omit
- * the hostPort
(or set it to 0
) while specifying a
+ *
If you use containers in a task with the awsvpc
or host
+ * network mode, the hostPort
can either be left blank or set to the same
+ * value as the containerPort
.
If you use containers in a task with the bridge
network mode, you can
+ * specify a non-reserved host port for your container port mapping, or you can omit the
+ * hostPort
(or set it to 0
) while specifying a
* containerPort
and your container automatically receives a port in the
* ephemeral port range for your container instance operating system and Docker
* version.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the
* Amazon ECS container agent ports 51678-51680. Any host port that was previously specified in
- * a running task is also reserved while the task is running (after a task stops, the host
- * port is released). The current reserved ports are displayed in the
+ * a running task is also reserved while the task is running. That is, after a task stops,
+ * the host port is released. The current reserved ports are displayed in the
* remainingResources
of DescribeContainerInstances
- * output. A container instance can have up to 100 reserved ports at a time, including the
- * default reserved ports. Automatically assigned ports don't count toward the 100 reserved
- * ports limit.
The Amazon Resource Name (ARN) of the secret containing the private repository * credentials.
*When you are using the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the - * same Region as the task that you are launching then you can use either the full ARN - * or the name of the secret. When you are using the Amazon Web Services Management Console, you must specify the - * full ARN of the secret.
+ *When you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same + * Region as the task that you're launching then you can use either the full ARN or the + * name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN of the + * secret.
*The type and amount of a resource to assign to a container. The supported resource * types are GPUs and Elastic Inference accelerators. For more information, see Working with - * GPUs on Amazon ECS or Working with Amazon Elastic Inference on Amazon ECS in the + * GPUs on Amazon ECS or Working with + * Amazon Elastic Inference on Amazon ECS in the * Amazon Elastic Container Service Developer Guide *
*/ @@ -4443,12 +4528,12 @@ export interface ResourceRequirement { /** *The value for the specified resource type.
*If the GPU
type is used, the value is the number of physical
- * GPUs
the Amazon ECS container agent will reserve for the container. The
- * number of GPUs reserved for all containers in a task should not exceed the number of
- * available GPUs on the container instance the task is launched on.
If the InferenceAccelerator
type is used, the value
should
- * match the deviceName
for an InferenceAccelerator
- * specified in a task definition.
GPUs
the Amazon ECS container agent reserves for the container. The number
+ * of GPUs that's reserved for all containers in a task can't exceed the number of
+ * available GPUs on the container instance that the task is launched on.
+ * If the InferenceAccelerator
type is used, the value
matches
+ * the deviceName
for an InferenceAccelerator specified in a
+ * task definition.
A list of namespaced kernel parameters to set in the container. This parameter maps to
* Sysctls
in the Create a container section of the
* Docker Remote API and the --sysctl
option to docker run.
It is not recommended that you specify network-related systemControls
- * parameters for multiple containers in a single task that also uses either the
- * awsvpc
or host
network mode for the following
- * reasons:
We don't recommend that you specify network-related systemControls
+ * parameters for multiple containers in a single task. This task also uses either the
+ * awsvpc
or host
network mode. It does it for the following
+ * reasons.
For tasks that use the awsvpc
network mode, if you set
* systemControls
for any container, it applies to all containers
* in the task. If you set different systemControls
for multiple
- * containers in a single task, the container that is started last determines which
+ * containers in a single task, the container that's started last determines which
* systemControls
take effect.
For tasks that use the host
network mode, the
* systemControls
parameter applies to the container instance's
- * kernel parameter as well as that of all containers of any tasks running on that
+ * kernel parameter and that of all containers of any tasks running on that
* container instance.
The namespaced kernel parameter for which to set a value
.
The namespaced kernel parameter to set a value
for.
The value for the namespaced kernel parameter specified in
- * namespace
.
The value for the namespaced kernel parameter that's specified in
+ * namespace
.
The name of another container within the same task definition from which to mount - * volumes.
+ *The name of another container within the same task definition to mount volumes + * from.
*/ sourceContainer?: string; @@ -4601,7 +4686,7 @@ export namespace VolumeFrom { */ export interface ContainerDefinition { /** - *The name of a container. If you are linking multiple containers together in a task + *
The name of a container. If you're linking multiple containers together in a task
* definition, the name
of one container can be entered in the
* links
of another container to connect the containers.
* Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name
in the
@@ -4613,7 +4698,7 @@ export interface ContainerDefinition {
/**
*
The image used to start a container. This string is passed directly to the Docker
- * daemon. Images in the Docker Hub registry are available by default. Other repositories
+ * daemon. By default, images in the Docker Hub registry are available. Other repositories
* are specified with either When a new task starts, the Amazon ECS container agent pulls the latest version of
* the specified image and tag for the container to use. However, subsequent
- * updates to a repository image are not propagated to already running
- * tasks.
* repository-url/image:tag
*
or
@@ -4626,8 +4711,7 @@ export interface ContainerDefinition {
*
Images in Amazon ECR repositories can be specified by either using the full @@ -4674,17 +4758,17 @@ export interface ContainerDefinition { *
Linux containers share unallocated CPU units with other containers on the container * instance with the same ratio as their allocated amount. For example, if you run a * single-container task on a single-core instance type with 512 CPU units specified for - * that container, and that is the only task running on the container instance, that + * that container, and that's the only task running on the container instance, that * container could use the full 1,024 CPU unit share at any given time. However, if you - * launched another copy of the same task on that container instance, each task would be - * guaranteed a minimum of 512 CPU units when needed, and each container could float to - * higher CPU usage if the other container was not using it, but if both tasks were 100% + * launched another copy of the same task on that container instance, each task is + * guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float + * to higher CPU usage if the other container was not using it. If both tasks were 100% * active all of the time, they would be limited to 512 CPU units.
*On Linux container instances, the Docker daemon on the container instance uses the CPU * value to calculate the relative CPU share ratios for running containers. For more * information, see CPU share * constraint in the Docker documentation. The minimum valid CPU share value - * that the Linux kernel allows is 2. However, the CPU parameter is not required, and you + * that the Linux kernel allows is 2. However, the CPU parameter isn't required, and you * can use CPU values below 2 in your container definitions. For CPU values below 2 * (including null), the behavior varies based on your Amazon ECS container agent * version:
@@ -4703,7 +4787,7 @@ export interface ContainerDefinition { *On Windows container instances, the CPU limit is enforced as an absolute limit, or a
- * quota. Windows containers only have access to the specified amount of CPU that is
+ * quota. Windows containers only have access to the specified amount of CPU that's
* described in the task definition. A null or zero CPU value is passed to Docker as
* 0
, which Windows interprets as 1% of one CPU.
memory
and memoryReservation
value, memory
* must be greater than memoryReservation
. If you specify
* memoryReservation
, then that value is subtracted from the available
- * memory resources for the container instance on which the container is placed. Otherwise,
+ * memory resources for the container instance where the container is placed. Otherwise,
* the value of memory
is used.
- * The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should - * not specify fewer than 4 MiB of memory for your containers.
+ *The Docker daemon reserves a minimum of 4 MiB of memory for a container. Therefore, we + * recommend that you specify fewer than 4 MiB of memory for your containers.
*/ memory?: number; @@ -4742,7 +4826,7 @@ export interface ContainerDefinition { * definition. If you specify both,memory
must be greater than
* memoryReservation
. If you specify memoryReservation
, then
* that value is subtracted from the available memory resources for the container instance
- * on which the container is placed. Otherwise, the value of memory
is
+ * where the container is placed. Otherwise, the value of memory
is
* used.
* For example, if your container normally uses 128 MiB of memory, but occasionally * bursts to 256 MiB of memory for short periods of time, you can set a @@ -4750,8 +4834,8 @@ export interface ContainerDefinition { * 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory * from the remaining resources on the container instance, but also allow the container to * consume more memory resources when needed.
- *The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should - * not specify fewer than 4 MiB of memory for your containers.
+ *The Docker daemon reserves a minimum of 4 MiB of memory for a container. Therefore, we + * recommend that you specify fewer than 4 MiB of memory for your containers.
*/ memoryReservation?: number; @@ -4766,10 +4850,10 @@ export interface ContainerDefinition { * Create a container section of the Docker Remote API and the *--link
option to docker
* run.
- * This parameter is not supported for Windows containers.
*Containers that are collocated on a single container instance may be able to * communicate with each other without requiring links or host port mappings. Network * isolation is achieved on the container instance using security groups and VPC @@ -4781,12 +4865,12 @@ export interface ContainerDefinition { /** *
The list of port mappings for the container. Port mappings allow containers to access * ports on the host container instance to send or receive traffic.
- *For task definitions that use the awsvpc
network mode, you should only
- * specify the containerPort
. The hostPort
can be left blank or
- * it must be the same value as the containerPort
.
For task definitions that use the awsvpc
network mode, only specify the
+ * containerPort
. The hostPort
can be left blank or it must
+ * be the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than
- * localhost
. There is no loopback for port mappings on Windows, so you
- * cannot access a container's mapped port from the host itself.
localhost
. There's no loopback for port mappings on Windows, so you
+ * can't access a container's mapped port from the host itself.
* This parameter maps to If the All tasks must have at least one essential container. If you have an application that
- * is composed of multiple containers, you should group containers that are used for a
- * common purpose into components, and separate the different components into multiple task
+ * as All tasks must have at least one essential container. If you have an application
+ * that's composed of multiple containers, group containers that are used for a common
+ * purpose into components, and separate the different components into multiple task
* definitions. For more information, see Application
* Architecture in the Amazon Elastic Container Service Developer Guide. Early versions of the Amazon ECS container agent do not properly handle
+ * Early versions of the Amazon ECS container agent don't properly handle
* The entry point that is passed to the container. This parameter maps to
+ * The entry point that's passed to the container. This parameter maps to
* The command that is passed to the container. This parameter maps to The command that's passed to the container. This parameter maps to PortBindings
in the
* Create a container section of the Docker Remote API and the
* --publish
option to docker
@@ -4809,11 +4893,11 @@ export interface ContainerDefinition {
* essential
parameter of a container is marked as true
,
* and that container fails or stops for any reason, all other containers that are part of
* the task are stopped. If the essential
parameter of a container is marked
- * as false
, then its failure does not affect the rest of the containers in a
- * task. If this parameter is omitted, a container is assumed to be essential.false
, its failure doesn't affect the rest of the containers in a task.
+ * If this parameter is omitted, a container is assumed to be essential.entryPoint
parameters. If you have problems using
* entryPoint
, update your container agent or enter your commands and
* arguments as command
array items instead.Entrypoint
in the Create a container section of the
* Docker Remote API and the --entrypoint
option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.Cmd
- * in the Create a container section of the Docker Remote API and the
+ * Cmd
in
+ * the Create a container section of the Docker Remote API and the
* COMMAND
parameter to docker
* run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. If there are multiple arguments, each
- * argument should be a separated string in the array.
Env
in the Create a container section of the
* Docker Remote API and the --env
option to docker run.
* We do not recommend using plaintext environment variables for sensitive + *
We don't recommend that you use plaintext environment variables for sensitive * information, such as credential data.
*A list of files containing the environment variables to pass to a container. This
* parameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
- * file extension. Each line in an environment file should contain an environment variable
- * in VARIABLE=VALUE
format. Lines beginning with #
are treated
- * as comments and are ignored. For more information on the environment variable file
+ * file extension. Each line in an environment file contains an environment variable in
+ * VARIABLE=VALUE
format. Lines beginning with #
are treated
+ * as comments and are ignored. For more information about the environment variable file
* syntax, see Declare default
* environment variables in file.
If there are environment variables specified using the environment
* parameter in a container definition, they take precedence over the variables contained
* within an environment file. If multiple environment files are specified that contain the
- * same variable, they are processed from the top down. It is recommended to use unique
+ * same variable, they're processed from the top down. We recommend that you use unique
* variable names. For more information, see Specifying Environment
* Variables in the Amazon Elastic Container Service Developer Guide.
This parameter maps to Volumes
in the Create a container
* section of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as
- * $env:ProgramData
. Windows containers cannot mount directories on a
- * different drive, and mount point cannot be across drives.
$env:ProgramData
. Windows containers can't mount directories on a
+ * different drive, and mount point can't be across drives.
*/
mountPoints?: MountPoint[];
@@ -4910,20 +4994,19 @@ export interface ContainerDefinition {
* least version 1.26.0 of the container agent to enable container dependencies. However,
* we recommend using the latest container agent version. For information about checking
* your agent version and updating to the latest version, see Updating the Amazon ECS
- * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
- * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
+ * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using
+ * an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
* ecs-init
package. If your container instances are launched from version
* 20190301
or later, then they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
- * For tasks using the Fargate launch type, the task or service requires the followiwng platforms:
+ *For tasks using the Fargate launch type, the task or service requires + * the following platforms:
*Linux platform
- * version 1.3.0
or later.
Linux platform version 1.3.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
COMPLETE
,
* SUCCESS
, or HEALTHY
status. If a startTimeout
- * value is specified for containerB and it does not reach the desired status within that
- * time then containerA will give up and not start. This results in the task transitioning
- * to a STOPPED
state.
+ * value is specified for containerB and it doesn't reach the desired status within that
+ * time then containerA gives up and not start. This results in the task transitioning to a
+ * STOPPED
state.
* When the ECS_CONTAINER_START_TIMEOUT
container agent configuration
- * variable is used, it is enforced indendently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires the followiwng platforms:
+ *For tasks using the Fargate launch type, the task or service requires + * the following platforms:
*Linux platform
- * version 1.3.0
or later.
Linux platform version 1.3.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at
@@ -4957,9 +5039,9 @@ export interface ContainerDefinition {
* timeout value. However, we recommend using the latest container agent version. For
* information about checking your agent version and updating to the latest version, see
* Updating the Amazon ECS
- * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
- * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1
of
- * the ecs-init
package. If your container instances are launched from version
+ * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using
+ * an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1
of the
+ * ecs-init
package. If your container instances are launched from version
* 20190301
or later, then they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it * doesn't exit normally on its own.
- *For tasks using the Fargate launch type, the task or service requires the followiwng platforms:
+ *For tasks using the Fargate launch type, the task or service requires + * the following platforms:
*Linux platform
- * version 1.3.0
or later.
Linux platform version 1.3.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the - * parameter is not specified, the default value of 30 seconds is used.
- *For tasks using the EC2 launch type, if the stopTimeout
- * parameter is not specified, the value set for the Amazon ECS container agent configuration
- * variable ECS_CONTAINER_STOP_TIMEOUT
is used by default. If neither the
+ *
The max stop timeout value is 120 seconds and if the parameter is not specified, the + * default value of 30 seconds is used.
+ *For tasks that use the EC2 launch type, if the stopTimeout
+ * parameter isn't specified, the value set for the Amazon ECS container agent configuration
+ * variable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the
* stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
* agent configuration variable are set, then the default values of 30 seconds for Linux
* containers and 30 seconds on Windows containers are used. Your container instances
@@ -4991,8 +5072,8 @@ export interface ContainerDefinition {
* timeout value. However, we recommend using the latest container agent version. For
* information about checking your agent version and updating to the latest version, see
* Updating the Amazon ECS
- * Container Agent in the Amazon Elastic Container Service Developer Guide. If you are
- * using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
+ * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using
+ * an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the
* ecs-init
package. If your container instances are launched from version
* 20190301
or later, then they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
--hostname
option to docker
* run.
* The hostname
parameter is not supported if you are using the
+ *
The hostname
parameter is not supported if you're using the
* awsvpc
network mode.
--user
option to docker
* run.
* When running tasks using the host
network mode, you should not run
- * containers using the root user (UID 0). It is considered best practice to use a
- * non-root user.
When running tasks using the host
network mode, don't run containers
+ * using the root user (UID 0). We recommend using a non-root user for better
+ * security.
You can specify the user
using the following formats. If specifying a UID
* or GID, you must specify it as a positive integer.
This parameter is not supported for Windows containers.
*The working directory in which to run commands inside the container. This parameter
- * maps to WorkingDir
in the Create a container section of the
+ *
The working directory to run commands inside the container in. This parameter maps to
+ * WorkingDir
in the Create a container section of the
* Docker Remote API and the --workdir
option to docker run.
When this parameter is true, networking is disabled within the container. This
* parameter maps to NetworkDisabled
in the Create a container
* section of the Docker Remote API.
This parameter is not supported for Windows containers.
*root
user). This parameter maps to
* Privileged
in the Create a container section of the
* Docker Remote API and the --privileged
option to docker run.
- * This parameter is not supported for Windows containers or tasks run on Fargate.
*--read-only
option to docker
* run.
- * This parameter is not supported for Windows containers.
*A list of DNS servers that are presented to the container. This parameter maps to
* Dns
in the Create a container section of the
* Docker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
*A list of DNS search domains that are presented to the container. This parameter maps
* to DnsSearch
in the Create a container section of the
* Docker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
*--add-host
option to docker
* run.
* This parameter is not supported for Windows containers or tasks that use the + *
This parameter isn't supported for Windows containers or tasks that use the
* awsvpc
network mode.
A list of strings to provide custom labels for SELinux and AppArmor multi-level - * security systems. This field is not valid for containers in tasks using the + * security systems. This field isn't valid for containers in tasks using the * Fargate launch type.
*With Windows containers, this parameter can be used to reference a credential spec * file when configuring a container for Active Directory authentication. For more @@ -5161,10 +5242,10 @@ export interface ContainerDefinition { dockerSecurityOptions?: string[]; /** - *
When this parameter is true
, this allows you to deploy containerized
- * applications that require stdin
or a tty
to be allocated. This
- * parameter maps to OpenStdin
in the Create a container
- * section of the Docker Remote API and the --interactive
option to docker run.
When this parameter is true
, you can deploy containerized applications
+ * that require stdin
or a tty
to be allocated. This parameter
+ * maps to OpenStdin
in the Create a container section of the
+ * Docker Remote API and the --interactive
option to docker run.
A list of ulimits
to set in the container. If a ulimit value is specified
- * in a task definition, it will override the default values set by Docker. This parameter
- * maps to Ulimits
in the Create a container section of the
+ * in a task definition, it overrides the default values set by Docker. This parameter maps
+ * to Ulimits
in the Create a container section of the
* Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed
* in the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default
@@ -5198,7 +5279,7 @@ export interface ContainerDefinition {
* is 4096
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
*
This parameter is not supported for Windows containers.
*--log-driver
option to docker
* run. By default, containers use the same logging driver that the Docker
- * daemon uses. However the container may use a different logging driver than the Docker
+ * daemon uses. However the container can use a different logging driver than the Docker
* daemon by specifying a log driver with this parameter in the container definition. To
* use a different logging driver for a container, the log system must be configured
* properly on the container instance (or on a different log server for remote logging
- * options). For more information on the options for different supported log drivers, see
- * Configure
+ * options). For more information about the options for different supported log drivers,
+ * see Configure
* logging drivers in the Docker documentation.
* Amazon ECS currently supports a subset of the logging drivers available to the Docker
@@ -5249,10 +5330,10 @@ export interface ContainerDefinition {
* Sysctls
in the Create a container section of the
* Docker Remote API and the --sysctl
option to docker run.
It is not recommended that you specify network-related systemControls
+ *
We don't recommended that you specify network-related systemControls
* parameters for multiple containers in a single task that also uses either the
* awsvpc
or host
network modes. For tasks that use the
- * awsvpc
network mode, the container that is started last determines
+ * awsvpc
network mode, the container that's started last determines
* which systemControls
parameters take effect. For tasks that use the
* host
network mode, it changes the container instance's namespaced
* kernel parameters as well as the containers.
This parameter is only supported for tasks hosted on Fargate using the following platform versions:
+ *This parameter is only supported for tasks hosted on Fargate using + * the following platform versions:
*Linux platform
- * version 1.4.0
or later.
Linux platform version 1.4.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
Details on a Elastic Inference accelerator. For more information, see Working with - * Amazon Elastic Inference on Amazon ECS in the - * Amazon Elastic Container Service Developer Guide.
+ *Details on an Elastic Inference accelerator. For more information, see Working with Amazon Elastic Inference on + * Amazon ECS in the Amazon Elastic Container Service Developer Guide.
*/ export interface InferenceAccelerator { /** @@ -5374,7 +5453,7 @@ export enum TaskDefinitionPlacementConstraintType { * information, see Task placement constraints in the * Amazon Elastic Container Service Developer Guide. *Task placement constraints are not supported for tasks run on Fargate.
+ *Task placement constraints aren't supported for tasks run on Fargate.
*The configuration details for the App Mesh proxy.
- *For tasks using the EC2 launch type, the container instances require at - * least version 1.26.0 of the container agent and at least version 1.26.0-1 of the + *
For tasks that use the EC2 launch type, the container instances require
+ * at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the
* ecs-init
package to enable a proxy configuration. If your container
- * instances are launched from the Amazon ECS-optimized AMI version 20190301
or
+ * instances are launched from the Amazon ECS optimized AMI version 20190301
or
* later, then they contain the required versions of the container agent and
* ecs-init
. For more information, see Amazon ECS-optimized Linux AMI
*
This parameter is specified when you are using Docker volumes. Docker volumes are only - * supported when you are using the EC2 launch type. Windows containers only + *
This parameter is specified when you're using Docker volumes. Docker volumes are only
+ * supported when you're using the EC2 launch type. Windows containers only
* support the use of the local
driver. To use bind mounts, specify a
* host
instead.
If this value is true
, the Docker volume is created if it does not
- * already exist.
If this value is true
, the Docker volume is created if it doesn't already
+ * exist.
This field is only used if the scope
is shared
.
Whether or not to use the Amazon ECS task IAM role defined in a task definition when + *
Determines whether to use the Amazon ECS task IAM role defined in a task definition when
* mounting the Amazon EFS file system. If enabled, transit encryption must be enabled in the
* This parameter is specified when you are using an Amazon Elastic File System file system for task
+ * This parameter is specified when you're using an Amazon Elastic File System file system for task
* storage. For more information, see Amazon EFS Volumes in the
* Amazon Elastic Container Service Developer Guide. Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host
- * and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is
- * used. If this parameter is omitted, the default value of Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS
+ * host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization
+ * is used. If this parameter is omitted, the default value of This parameter is specified when you are using Amazon FSx for Windows File Server file system for task
+ * This parameter is specified when you're using Amazon FSx for Windows File Server file system for task
* storage. For more information and the input format, see Amazon FSx for Windows File Server Volumes
* in the Amazon Elastic Container Service Developer Guide. When the If you are using the Fargate launch type, the If you're using the Fargate launch type, the A data volume used in a task definition. For tasks that use the Amazon Elastic File
- * System (Amazon EFS), specify an A data volume that's used in a task definition. For tasks that use the Amazon Elastic
+ * File System (Amazon EFS), specify an This parameter is specified when you are using bind mount host volumes. The contents
- * of the This parameter is specified when you use bind mount host volumes. The contents of the
+ * Windows containers can mount whole directories on the same drive as
- * This parameter is specified when you are using Docker volumes. This parameter is specified when you use Docker volumes. Windows containers only support the use of the Docker volumes are not supported by tasks run on Fargate. Docker volumes aren't supported by tasks run on Fargate. This parameter is specified when you are using an Amazon Elastic File System file system for task
+ * This parameter is specified when you use an Amazon Elastic File System file system for task
* storage. This parameter is specified when you are using Amazon FSx for Windows File Server file system for task
+ * This parameter is specified when you use Amazon FSx for Windows File Server file system for task
* storage. The name of a family that this task definition is registered to. Up to 255 letters
- * (uppercase and lowercase), numbers, hyphens, and underscores are allowed. The name of a family that this task definition is registered to. Up to 255 characters
+ * are allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and
+ * underscores (_) are allowed. A family groups multiple versions of a task definition. Amazon ECS gives the first task
* definition that you registered to a family a revision number of 1. Amazon ECS gives
* sequential revision numbers to each task definition that you add.EFSVolumeConfiguration
. If this parameter is omitted, the default value
* of DISABLED
is used. For more information, see Using
@@ -5643,7 +5722,7 @@ export enum EFSTransitEncryption {
}
/**
- * DISABLED
is used.
- * For more information, see Encrypting Data in Transit in
+ * DISABLED
is
+ * used. For more information, see Encrypting Data in Transit in
* the Amazon Elastic File System User Guide.host
parameter is used, specify a sourcePath
to
- * declare the path on the host container instance that is presented to the container. If
+ * declare the path on the host container instance that's presented to the container. If
* this parameter is empty, then the Docker daemon has assigned a host path for you. If the
* host
parameter contains a sourcePath
file location, then
* the data volume persists at the specified location on the host container instance until
- * you delete it manually. If the sourcePath
value does not exist on the host
+ * you delete it manually. If the sourcePath
value doesn't exist on the host
* container instance, the Docker daemon creates it. If the location does exist, the
* contents of the source path folder are exported.sourcePath
+ * sourcePath
* parameter is not supported.efsVolumeConfiguration
. For Windows tasks
- * that use Amazon FSx for Windows File Server file system, specify a
+ * efsVolumeConfiguration
. For Windows
+ * tasks that use Amazon FSx for Windows File Server file system, specify a
* fsxWindowsFileServerVolumeConfiguration
. For tasks that use a Docker
* volume, specify a DockerVolumeConfiguration
. For tasks that use a bind
* mount host volume, specify a host
and optional sourcePath
. For
@@ -5807,38 +5886,38 @@ export interface Volume {
name?: string;
/**
- * host
parameter determine whether your bind mount host volume
- * persists on the host container instance and where it is stored. If the host
- * parameter is empty, then the Docker daemon assigns a host path for your data volume.
- * However, the data is not guaranteed to persist after the containers associated with it
- * stop running.host
parameter determine whether your bind mount host volume persists
+ * on the host container instance and where it's stored. If the host
parameter
+ * is empty, then the Docker daemon assigns a host path for your data volume. However, the
+ * data isn't guaranteed to persist after the containers that are associated with it stop
+ * running.$env:ProgramData
. Windows containers cannot mount directories on a
- * different drive, and mount point cannot be across drives. For example, you can mount
+ * $env:ProgramData
. Windows containers can't mount directories on a
+ * different drive, and mount point can't be across drives. For example, you can mount
* C:\my\path:C:\my\path
and D:\:D:\
, but not
* D:\my\path:C:\my\path
or D:\:C:\my\path
.local
driver. To use bind
* mounts, specify the host
parameter instead.
IAM roles for tasks on Windows require that the -EnableTaskIAMRole
option
* is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some
- * configuration code in order to take advantage of the feature. For more information, see
- * Windows IAM roles
+ * configuration code to use the feature. For more information, see Windows IAM roles
* for tasks in the Amazon Elastic Container Service Developer Guide.
The revision of the task in a particular family. The revision is a version number of a
* task definition in a family. When you register a task definition for the first time, the
* revision is 1
. Each time that you register a new revision of a task
- * definition in the same family, the revision value always increases by one, even if you
- * have deregistered previous revisions in this family.
The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the * Amazon Elastic Container Service Developer Guide.
*The host
and sourcePath
parameters are not supported for
+ *
The host
and sourcePath
parameters aren't supported for
* tasks run on Fargate.
The container instance attributes required by your task. When an Amazon EC2 instance is * registered to your cluster, the Amazon ECS container agent assigns some standard attributes - * to the instance. You can apply custom attributes, specified as key-value pairs using the - * Amazon ECS console or the PutAttributes API. These attributes are used when - * considering task placement for tasks hosted on Amazon EC2 instances. For more information, - * see Attributes in the Amazon Elastic Container Service Developer Guide.
+ * to the instance. You can apply custom attributes. These are specified as key-value pairs + * using the Amazon ECS console or the PutAttributes API. These attributes are + * used when determining task placement for tasks hosted on Amazon EC2 instances. For more + * information, see Attributes in the Amazon Elastic Container Service Developer Guide. *This parameter is not supported for tasks run on Fargate.
+ *This parameter isn't supported for tasks run on Fargate.
*An array of placement constraint objects to use for tasks.
*This parameter is not supported for tasks run on Fargate.
+ *This parameter isn't supported for tasks run on Fargate.
*The operating system that your task definitions are running on. A platform family is specified only for tasks using the Fargate launch type.
- *When you specify a task in a service, this value must match the runtimePlatform
value of the service.
The operating system that your task definitions are running on. A platform family is + * specified only for tasks using the Fargate launch type.
+ *When you specify a task in a service, this value must match the
+ * runtimePlatform
value of the service.
The number of cpu
units used by the task. If you are using the EC2 launch
- * type, this field is optional and any value can be used. If you are using the Fargate
- * launch type, this field is required and you must use one of the following values, which
- * determines your range of valid values for the memory
parameter:
The number of cpu
units used by the task. If you use the EC2 launch type,
+ * this field is optional. Any value can be used. If you use the Fargate launch type, this
+ * field is required. You must use one of the following values. The value that you choose
+ * determines your range of valid values for the memory
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on + * Fargate.
*256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
The amount (in MiB) of memory used by the task.
- *If your tasks will be run on Amazon EC2 instances, you must specify either a task-level - * memory value or a container-level memory value. This field is optional and any value can - * be used. If a task-level memory value is specified then the container-level memory value - * is optional. For more information regarding container-level memory and memory - * reservation, see ContainerDefinition.
- *If your tasks will be run on Fargate, this field is required and you must use one of
- * the following values, which determines your range of valid values for the
- * cpu
parameter:
If your tasks runs on Amazon EC2 instances, you must specify either a task-level memory + * value or a container-level memory value. This field is optional and any value can be + * used. If a task-level memory value is specified, the container-level memory value is + * optional. For more information regarding container-level memory and memory reservation, + * see ContainerDefinition.
+ *If your tasks runs on Fargate, this field is required. You must use one of the
+ * following values. The value you choose determines your range of valid values for the
+ * cpu
parameter.
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
The Elastic Inference accelerator associated with the task.
+ *The Elastic Inference accelerator that's associated with the task.
*/ inferenceAccelerators?: InferenceAccelerator[]; @@ -6118,19 +6201,19 @@ export interface TaskDefinition { *The configuration details for the App Mesh proxy.
*Your Amazon ECS container instances require at least version 1.26.0 of the container agent
* and at least version 1.26.0-1 of the ecs-init
package to enable a proxy
- * configuration. If your container instances are launched from the Amazon ECS-optimized AMI
- * version 20190301
or later, then they contain the required versions of the
+ * configuration. If your container instances are launched from the Amazon ECS optimized AMI
+ * version 20190301
or later, they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The Unix timestamp for when the task definition was registered.
+ *The Unix timestamp for the time when the task definition was registered.
*/ registeredAt?: Date; /** - *The Unix timestamp for when the task definition was deregistered.
+ *The Unix timestamp for the time when the task definition was deregistered.
*/ deregisteredAt?: Date; @@ -6184,7 +6267,7 @@ export interface DescribeCapacityProvidersRequest { /** *Specifies whether or not you want to see the resource tags for the capacity provider.
* If TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
Whether to include additional information about the clusters in the response. If this - * field is omitted, this information isn't included.
+ *Determines whether to include additional information about the clusters in the + * response. If this field is omitted, this information isn't included.
*If ATTACHMENTS
is specified, the attachments for the container instances
* or tasks within the cluster are included.
If SETTINGS
is specified, the settings for the cluster are
@@ -6348,6 +6431,7 @@ export namespace DescribeClustersResponse {
}
export enum ContainerInstanceField {
+ CONTAINER_INSTANCE_HEALTH = "CONTAINER_INSTANCE_HEALTH",
TAGS = "TAGS",
}
@@ -6367,8 +6451,10 @@ export interface DescribeContainerInstancesRequest {
/**
*
Specifies whether you want to see the resource tags for the container instance. If
- * TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
TAGS
is specified, the tags are included in the response. If
+ * CONTAINER_INSTANCE_HEALTH
is specified, the container instance health
+ * is included in the response. If this field is omitted, tags and container instance
+ * health status aren't included in the response.
*/
include?: (ContainerInstanceField | string)[];
}
@@ -6422,9 +6508,9 @@ export interface DescribeServicesRequest {
services: string[] | undefined;
/**
- * Specifies whether you want to see the resource tags for the service. If + *
Determines whether you want to see the resource tags for the service. If
* TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
Specifies whether to see the resource tags for the task definition. If + *
Determines whether to see the resource tags for the task definition. If
* TAGS
is specified, the tags are included in the response. If this field
- * is omitted, tags are not included in the response.
The metadata that is applied to the task definition to help you categorize and - * organize them. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
+ *The metadata that's applied to the task definition to help you categorize and organize + * them. Each tag consists of a key and an optional value. You define both.
+ *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -6562,8 +6647,8 @@ export interface DescribeTasksRequest { /** *Specifies whether you want to see the resource tags for the task. If TAGS
- * is specified, the tags are included in the response. If this field is omitted, tags are
- * not included in the response.
The Unix timestamp for when the managed agent was last started.
+ *The Unix timestamp for the time when the managed agent was last started.
*/ lastStartedAt?: Date; @@ -6640,12 +6725,12 @@ export interface NetworkBinding { bindIP?: string; /** - *The port number on the container that is used with the network binding.
+ *The port number on the container that's used with the network binding.
*/ containerPort?: number; /** - *The port number on the host that is used with the network binding.
+ *The port number on the host that's used with the network binding.
*/ hostPort?: number; @@ -6695,7 +6780,7 @@ export namespace NetworkInterface { } /** - *A Docker container that is part of a task.
+ *A Docker container that's part of a task.
*/ export interface Container { /** @@ -6759,7 +6844,7 @@ export interface Container { networkInterfaces?: NetworkInterface[]; /** - *The health status of the container. If health checks are not configured for this + *
The health status of the container. If health checks aren't configured for this
* container in its task definition, then it reports the health status as
* UNKNOWN
.
The number of CPU units set for the container. The value will be 0
if no
- * value was specified in the container definition when the task definition was
+ *
The number of CPU units set for the container. The value is 0
if no value
+ * was specified in the container definition when the task definition was
* registered.
The overrides that should be sent to a container. An empty container override can be
- * passed in. An example of an empty container override would be
- * {"containerOverrides": [ ] }
. If a non-empty container override is
- * specified, the name
parameter must be included.
The overrides that are sent to a container. An empty container override can be passed
+ * in. An example of an empty container override is {"containerOverrides": [ ]
+ * }
. If a non-empty container override is specified, the name
+ * parameter must be included.
Details on an Elastic Inference accelerator task override. This parameter is used to * override the Elastic Inference accelerator specified in the task definition. For more - * information, see Working with Amazon Elastic Inference on Amazon ECS in the - * Amazon Elastic Container Service Developer Guide.
+ * information, see Working with Amazon + * Elastic Inference on Amazon ECS in the + * Amazon Elastic Container Service Developer Guide. */ export interface InferenceAcceleratorOverride { /** @@ -6899,11 +6985,11 @@ export namespace InferenceAcceleratorOverride { } /** - *The overrides associated with a task.
+ *The overrides that are associated with a task.
*/ export interface TaskOverride { /** - *One or more container overrides sent to a task.
+ *One or more container overrides that are sent to a task.
*/ containerOverrides?: ContainerOverride[]; @@ -6918,8 +7004,8 @@ export interface TaskOverride { inferenceAcceleratorOverrides?: InferenceAcceleratorOverride[]; /** - *The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more information,
- * see Amazon ECS task
+ * The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more
+ * information, see Amazon ECS task
* execution IAM role in the Amazon Elastic Container Service Developer Guide. The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in
- * this task are granted the permissions that are specified in this role. For more
+ * The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers
+ * in this task are granted the permissions that are specified in this role. For more
* information, see IAM Role for Tasks
* in the Amazon Elastic Container Service Developer Guide. The ephemeral storage setting override for the task. This parameter is only supported for tasks hosted on Fargate using the following platform versions: This parameter is only supported for tasks hosted on Fargate that
+ * use the following platform versions: Linux platform
- * version Linux platform version Windows platform
- * version Windows platform version The Elastic Network Adapter associated with the task if the task uses the
+ * The Elastic Network Adapter that's associated with the task if the task uses the
* The availability zone of the task. The Availability Zone for the task. The capacity provider associated with the task. The capacity provider that's associated with the task. The Unix timestamp for when the task last went into The Unix timestamp for the time when the task last went into The containers associated with the task. The containers that's associated with the task. The number of CPU units used by the task as expressed in a task definition. It can be
- * expressed as an integer using CPU units, for example If you are using the EC2 launch type, this field is optional. Supported
- * values are between If you are using the Fargate launch type, this field is required and you
- * must use one of the following values, which determines your range of supported values
- * for the
*
* 1.4.0
or later.1.4.0
or later.1.0.0
or later.1.0.0
or later.awsvpc
network mode.CONNECTED
+ * CONNECTED
* status.1024
. It can also be
- * expressed as a string using vCPUs, for example 1 vCPU
or 1
- * vcpu
. String values are converted to an integer indicating the CPU units when
- * the task definition is registered.128
CPU units (0.125
vCPUs) and
- * 10240
CPU units (10
vCPUs).memory
parameter:
+ * expressed as an integer using CPU units (for example,
1024
). It can also be
+ * expressed as a string using vCPUs (for example, 1 vCPU
or 1
+ * vcpu
). String values are converted to an integer that indicates the CPU units
+ * when the task definition is registered.
If you use the EC2 launch type, this field is optional. Supported values
+ * are between 128
CPU units (0.125
vCPUs) and 10240
+ * CPU units (10
vCPUs).
If you use the Fargate launch type, this field is required. You must use
+ * one of the following values. These values determine the range of supported values for
+ * the memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on + * Fargate.
+ *256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
The Unix timestamp for when the task was created (the task entered the
- * PENDING
state).
The Unix timestamp for the time when the task was created. More specifically, it's for
+ * the time when the task entered the PENDING
state.
Whether or not execute command functionality is enabled for this task. If
- * true
, this enables execute command functionality on all containers in
+ *
Determines whether execute command functionality is enabled for this task. If
+ * true
, execute command functionality is enabled on all the containers in
* the task.
The Unix timestamp for when the task execution stopped.
+ *The Unix timestamp for the time when the task execution stopped.
*/ executionStoppedAt?: Date; /** - *The name of the task group associated with the task.
+ *The name of the task group that's associated with the task.
*/ group?: string; /** - *The health status for the task, which is determined by the health of the essential + *
The health status for the task. It's determined by the health of the essential
* containers in the task. If all essential containers in the task are reporting as
- * HEALTHY
, then the task status also reports as HEALTHY
. If
- * any essential containers in the task are reporting as UNHEALTHY
or
- * UNKNOWN
, then the task status also reports as UNHEALTHY
or
- * UNKNOWN
, accordingly.
HEALTHY
, the task status also reports as HEALTHY
. If any
+ * essential containers in the task are reporting as UNHEALTHY
or
+ * UNKNOWN
, the task status also reports as UNHEALTHY
or
+ * UNKNOWN
.
* The Amazon ECS container agent does not monitor or report on Docker health checks that - * are embedded in a container image (such as those specified in a parent image or from - * the image's Dockerfile) and not specified in the container definition. Health check - * parameters that are specified in a container definition override any Docker health - * checks that exist in the container image.
+ *The Amazon ECS container agent doesn't monitor or report on Docker health checks that + * are embedded in a container image and not specified in the container definition. For + * example, this includes those specified in a parent image or from the image's + * Dockerfile. Health check parameters that are specified in a container definition + * override any Docker health checks that are found in the container image.
*The Elastic Inference accelerator associated with the task.
+ *The Elastic Inference accelerator that's associated with the task.
*/ inferenceAccelerators?: InferenceAccelerator[]; /** - *The last known status of the task. For more information, see Task
+ * The last known status for the task. For more information, see Task
* Lifecycle. The infrastructure on which your task is running. For more information, see Amazon ECS
+ * The infrastructure where your task runs on. For more information, see Amazon ECS
* launch types in the Amazon Elastic Container Service Developer Guide. The amount of memory (in MiB) used by the task as expressed in a task definition. It
- * can be expressed as an integer using MiB, for example The amount of memory (in MiB) that the task uses as expressed in a task definition. It
+ * can be expressed as an integer using MiB (for example, If you are using the EC2 launch type, this field is optional. If you are using the Fargate launch type, this field is required and you
- * must use one of the following values, which determines your range of supported values
- * for the If you use the EC2 launch type, this field is optional. If you use the Fargate launch type, this field is required. You must use
+ * one of the following values. The value that you choose determines the range of supported
+ * values for the 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available The platform version on which your task is running. A platform version is only
- * specified for tasks using the Fargate launch type. If one is not
- * specified, the The platform version where your task runs on. A platform version is only specified for
+ * tasks that use the Fargate launch type. If you didn't specify one, the
+ * The operating system that your tasks are running on. A platform family is specified only for tasks using the Fargate launch type. All tasks that run as part of this service must use the same The operating system that your tasks are running on. A platform family is specified
+ * only for tasks that use the Fargate launch type. All tasks that run as part of this service must use the same
+ * The Unix timestamp for when the container image pull began. The Unix timestamp for the time when the container image pull began. The Unix timestamp for when the container image pull completed. The Unix timestamp for the time when the container image pull completed. The Unix timestamp for when the task started (the task transitioned from the
- * The Unix timestamp for the time when the task started. More specifically, it's for the
+ * time when the task transitioned from the The tag specified when a task is started. If the task is started by an Amazon ECS service,
- * then the The tag specified when a task is started. If an Amazon ECS service started the task, the
+ * The stop code indicating why a task was stopped. The The stop code indicating why a task was stopped. The The Unix timestamp for when the task was stopped (the task transitioned from the
- * The Unix timestamp for the time when the task was stopped. More specifically, it's for
+ * the time when the task transitioned from the The Unix timestamp for when the task stops (transitions from the The Unix timestamp for the time when the task stops. More specifically, it's for the
+ * time when the task transitions from the The metadata that you apply to the task to help you categorize and organize them. Each
- * tag consists of a key and an optional value, both of which you define. The metadata that you apply to the task to help you categorize and organize the task.
+ * Each tag consists of a key and an optional value. You define both the key and
+ * value. The following basic restrictions apply to tags: The version counter for the task. Every time a task experiences a change that triggers
- * a CloudWatch event, the version counter is incremented. If you are replicating your Amazon ECS task
- * state with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API
+ * The version counter for the task. Every time a task experiences a change that starts a
+ * CloudWatch event, the version counter is incremented. If you replicate your Amazon ECS task state
+ * with CloudWatch Events, you can compare the version of a task reported by the Amazon ECS API
* actions with the version reported in CloudWatch Events for the task (inside the
* Specifies whether to see the resource tags for the task set. If The short name or full Amazon Resource Name (ARN) of the cluster to which the container instance
- * belongs. The short name or full Amazon Resource Name (ARN) of the cluster that the container instance belongs
+ * to. The details of the execute command session. The details for the execute command session. A URL back to managed agent on the container that the SSM Session Manager client uses
- * to send commands and receive output from the container. A URL
+ * back
+ * to managed agent on the container that the SSM Session Manager client
+ * uses to send commands and receive output from the container. An encrypted token value containing session and caller information. Used to
+ * An encrypted token value containing session and caller information. It's used to
* authenticate the connection to the container. Whether or not the execute command session is running in interactive mode. Amazon ECS only
- * supports initiating interactive sessions, so you must specify Determines whether the execute command session is running in interactive mode. Amazon ECS
+ * only supports initiating interactive sessions, so you must specify The target container is not properly configured with the execute command agent or the
+ * The target container isn't properly configured with the execute command agent or the
* container is no longer active or running. The value of the account settings with which to filter results. You must also specify
- * an account setting name to use this parameter. The value of the account settings to filter results with. You must also specify an
+ * account setting name to use this parameter. Specifies whether to return the effective settings. If Determines whether to return the effective settings. If The This token should be treated as an opaque identifier that is only used to
* retrieve the next items in a list and not for other programmatic purposes.1024
. It can also be
- * expressed as a string using GB, for example 1GB
or 1 GB
.
- * String values are converted to an integer indicating the MiB when the task definition is
+ * 1024
). If it's
+ * expressed as a string using GB (for example, 1GB
or 1 GB
),
+ * it's converted to an integer indicating the MiB when the task definition is
* registered.cpu
parameter:
+ *
cpu
parameter.
*
cpu
values: 256 (.25 vCPU)LATEST
platform version is used by default. For more
- * information, see Fargate Platform
- * Versions in the Amazon Elastic Container Service Developer Guide.LATEST
platform version is used. For more information, see Fargate Platform Versions in the
+ * Amazon Elastic Container Service Developer Guide.platformFamily
value as the service, for example, LINUX.
.platformFamily
value as the service (for example,
+ * LINUX.
).PENDING
state to the RUNNING
state).PENDING
state to the
+ * RUNNING
state.startedBy
parameter contains the deployment ID of the service that
- * starts it.startedBy
parameter contains the deployment ID of that service.stoppedReason
may
+ * stoppedReason
might
* contain additional details.RUNNING
state to the STOPPED
state).RUNNING
state to the
+ * STOPPED
state.RUNNING
- * state to STOPPED
).RUNNING
state to
+ * STOPPED
.
*
detail
object) to verify that the version in your event stream is
* current.TAGS
is
- * specified, the tags are included in the response. If this field is omitted, tags are not
+ * specified, the tags are included in the response. If this field is omitted, tags aren't
* included in the response.true
for this
- * value.true
for
+ * this value.true
, the account
+ * true
, the account
* settings for the root user or the default setting for the principalArn
are
* returned. If false
, the account settings for the principalArn
- * are returned if they are set. Otherwise, no account settings are returned.nextToken
value returned from a ListAccountSettings
* request indicating that more results are available to fulfill the request and further
- * calls will be needed. If maxResults
was provided, it is possible the number
+ * calls will be needed. If maxResults
was provided, it's possible the number
* of results to be fewer than maxResults
.ListAccountSettings
* request with the returned nextToken
value. This value can be between
* 1 and 10. If this
- * parameter is not used, then ListAccountSettings
returns up to
+ * parameter isn't used, then ListAccountSettings
returns up to
* 10 results and a nextToken
value
* if applicable.
The type of the target with which to list attributes.
+ *The type of the target to list attributes with.
*/ targetType: TargetType | string | undefined; /** - *The name of the attribute with which to filter the results.
+ *The name of the attribute to filter the results with.
*/ attributeName?: string; /** - *The value of the attribute with which to filter results. You must also specify an - * attribute name to use this parameter.
+ *The value of the attribute to filter results with. You must also specify an attribute + * name to use this parameter.
*/ attributeValue?: string; /** *The nextToken
value returned from a ListAttributes
request
- * indicating that more results are available to fulfill the request and further calls will
- * be needed. If maxResults
was provided, it is possible the number of results
- * to be fewer than maxResults
.
maxResults
was provided, it's possible the number of results to
+ * be fewer than maxResults
.
+ * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of cluster results returned by ListAttributes
in
+ *
The maximum number of cluster results that ListAttributes
returned in
* paginated output. When this parameter is used, ListAttributes
only returns
* maxResults
results in a single page along with a nextToken
* response element. The remaining results of the initial request can be seen by sending
* another ListAttributes
request with the returned nextToken
* value. This value can be between 1 and 100. If this
- * parameter is not used, then ListAttributes
returns up to
+ * parameter isn't used, then ListAttributes
returns up to
* 100 results and a nextToken
value if applicable.
The nextToken
value returned from a ListClusters
request
- * indicating that more results are available to fulfill the request and further calls will
- * be needed. If maxResults
was provided, it is possible the number of results
- * to be fewer than maxResults
.
maxResults
was provided, it's possible the number of results to
+ * be fewer than maxResults
.
+ * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of cluster results returned by ListClusters
in
+ *
The maximum number of cluster results that ListClusters
returned in
* paginated output. When this parameter is used, ListClusters
only returns
* maxResults
results in a single page along with a nextToken
* response element. The remaining results of the initial request can be seen by sending
* another ListClusters
request with the returned nextToken
* value. This value can be between 1 and 100. If this
- * parameter is not used, then ListClusters
returns up to
- * 100 results and a nextToken
value if applicable.
ListClusters
returns up to 100
+ * results and a nextToken
value if applicable.
*/
maxResults?: number;
}
@@ -7755,7 +7849,7 @@ export namespace ListClustersRequest {
export interface ListClustersResponse {
/**
- * The list of full Amazon Resource Name (ARN) entries for each cluster associated with your + *
The list of full Amazon Resource Name (ARN) entries for each cluster that's associated with your * account.
*/ clusterArns?: string[]; @@ -7804,9 +7898,9 @@ export interface ListContainerInstancesRequest { /** *The nextToken
value returned from a ListContainerInstances
* request indicating that more results are available to fulfill the request and further
- * calls will be needed. If maxResults
was provided, it is possible the number
- * of results to be fewer than maxResults
.
maxResults
was provided, it's possible the number of
+ * results to be fewer than maxResults
.
+ * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of container instance results returned by
- * ListContainerInstances
in paginated output. When this parameter is
- * used, ListContainerInstances
only returns maxResults
results
- * in a single page along with a nextToken
response element. The remaining
- * results of the initial request can be seen by sending another
- * ListContainerInstances
request with the returned nextToken
- * value. This value can be between 1 and 100. If this
- * parameter is not used, then ListContainerInstances
returns up to
- * 100 results and a nextToken
value if applicable.
The maximum number of container instance results that
+ * ListContainerInstances
returned in paginated output. When this
+ * parameter is used, ListContainerInstances
only returns
+ * maxResults
results in a single page along with a nextToken
+ * response element. The remaining results of the initial request can be seen by sending
+ * another ListContainerInstances
request with the returned
+ * nextToken
value. This value can be between 1 and
+ * 100. If this parameter isn't used, then
+ * ListContainerInstances
returns up to 100 results and
+ * a nextToken
value if applicable.
Filters the container instances by status. For example, if you specify the
* DRAINING
status, the results include only container instances that have
* been set to DRAINING
using UpdateContainerInstancesState.
- * If you do not specify this parameter, the default is to include container instances set
+ * If you don't specify this parameter, the default is to include container instances set
* to all states other than INACTIVE
.
maxResults
was provided, it is possible the number of results
* to be fewer than maxResults
.
- * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of service results returned by ListServices
in
+ *
The maximum number of service results that ListServices
returned in
* paginated output. When this parameter is used, ListServices
only returns
* maxResults
results in a single page along with a nextToken
* response element. The remaining results of the initial request can be seen by sending
* another ListServices
request with the returned nextToken
* value. This value can be between 1 and 100. If
- * this parameter is not used, then ListServices
returns up to
+ * this parameter isn't used, then ListServices
returns up to
* 10 results and a nextToken
value if
* applicable.
The list of full ARN entries for each service associated with the specified + *
The list of full ARN entries for each service that's associated with the specified * cluster.
*/ serviceArns?: string[]; @@ -7952,7 +8047,7 @@ export namespace ListServicesResponse { export interface ListTagsForResourceRequest { /** - *The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the + *
The Amazon Resource Name (ARN) that identifies the resource to list the tags for. Currently, the * supported resources are Amazon ECS tasks, services, task definitions, clusters, and container * instances.
*/ @@ -7992,7 +8087,7 @@ export enum TaskDefinitionFamilyStatus { export interface ListTaskDefinitionFamiliesRequest { /** - *The familyPrefix
is a string that is used to filter the results of
+ *
The familyPrefix
is a string that's used to filter the results of
* ListTaskDefinitionFamilies
. If you specify a familyPrefix
,
* only task definition family names that begin with the familyPrefix
string
* are returned.
The task definition family status with which to filter the
- * ListTaskDefinitionFamilies
results. By default, both
+ *
The task definition family status to filter the
+ * ListTaskDefinitionFamilies
results with. By default, both
* ACTIVE
and INACTIVE
task definition families are listed.
* If this parameter is set to ACTIVE
, only task definition families that have
* an ACTIVE
task definition revision are returned. If this parameter is set
@@ -8018,7 +8113,7 @@ export interface ListTaskDefinitionFamiliesRequest {
* available to fulfill the request and further calls will be needed. If
* maxResults
was provided, it is possible the number of results to be
* fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of task definition family results returned by
- * ListTaskDefinitionFamilies
in paginated output. When this parameter is
- * used, ListTaskDefinitions
only returns maxResults
results in a
- * single page along with a nextToken
response element. The remaining results
- * of the initial request can be seen by sending another
+ *
The maximum number of task definition family results that
+ * ListTaskDefinitionFamilies
returned in paginated output. When this
+ * parameter is used, ListTaskDefinitions
only returns maxResults
+ * results in a single page along with a nextToken
response element. The
+ * remaining results of the initial request can be seen by sending another
* ListTaskDefinitionFamilies
request with the returned
* nextToken
value. This value can be between 1 and
- * 100. If this parameter is not used, then
+ * 100. If this parameter isn't used, then
* ListTaskDefinitionFamilies
returns up to 100 results
* and a nextToken
value if applicable.
The full family name with which to filter the ListTaskDefinitions
- * results. Specifying a familyPrefix
limits the listed task definitions to
- * task definition revisions that belong to that family.
The full family name to filter the ListTaskDefinitions
results with.
+ * Specifying a familyPrefix
limits the listed task definitions to task
+ * definition revisions that belong to that family.
The task definition status with which to filter the ListTaskDefinitions
- * results. By default, only ACTIVE
task definitions are listed. By setting
- * this parameter to INACTIVE
, you can view task definitions that are
+ *
The task definition status to filter the ListTaskDefinitions
results
+ * with. By default, only ACTIVE
task definitions are listed. By setting this
+ * parameter to INACTIVE
, you can view task definitions that are
* INACTIVE
as long as an active task or service still references them. If
* you paginate the resulting output, be sure to keep the status
value
* constant in each subsequent request.
The order in which to sort the results. Valid values are ASC
and
- * DESC
. By default (ASC
), task definitions are listed
+ *
The order to sort the results in. Valid values are ASC
and
+ * DESC
. By default, (ASC
) task definitions are listed
* lexicographically by family name and in ascending numerical order by revision so that
* the newest task definitions in a family are listed last. Setting this parameter to
- * DESC
reverses the sort order on family name and revision so that the
- * newest task definitions in a family are listed first.
DESC
reverses the sort order on family name and revision. This is so
+ * that the newest task definitions in a family are listed first.
*/
sort?: SortOrder | string;
@@ -8113,7 +8208,7 @@ export interface ListTaskDefinitionsRequest {
* request indicating that more results are available to fulfill the request and further
* calls will be needed. If maxResults
was provided, it is possible the number
* of results to be fewer than maxResults
.
- * This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of task definition results returned by
- * ListTaskDefinitions
in paginated output. When this parameter is used,
+ *
The maximum number of task definition results that ListTaskDefinitions
+ * returned in paginated output. When this parameter is used,
* ListTaskDefinitions
only returns maxResults
results in a
* single page along with a nextToken
response element. The remaining results
* of the initial request can be seen by sending another ListTaskDefinitions
* request with the returned nextToken
value. This value can be between
- * 1 and 100. If this parameter is not used, then
+ * 1 and 100. If this parameter isn't used, then
* ListTaskDefinitions
returns up to 100 results and a
* nextToken
value if applicable.
The nextToken
value returned from a ListTasks
request
* indicating that more results are available to fulfill the request and further calls will
- * be needed. If maxResults
was provided, it is possible the number of results
+ * be needed. If maxResults
was provided, it's possible the number of results
* to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to * retrieve the next items in a list and not for other programmatic purposes.
*The maximum number of task results returned by ListTasks
in paginated
+ *
The maximum number of task results that ListTasks
returned in paginated
* output. When this parameter is used, ListTasks
only returns
* maxResults
results in a single page along with a nextToken
* response element. The remaining results of the initial request can be seen by sending
* another ListTasks
request with the returned nextToken
value.
- * This value can be between 1 and 100. If this parameter is
- * not used, then ListTasks
returns up to 100 results and a
- * nextToken
value if applicable.
ListTasks
returns up to 100 results and
+ * a nextToken
value if applicable.
*/
maxResults?: number;
/**
- * The startedBy
value with which to filter the task results. Specifying a
+ *
The startedBy
value to filter the task results with. Specifying a
* startedBy
value limits the results to tasks that were started with that
* value.
The task desired status to use when filtering the ListTasks
results.
* Specifying a desiredStatus
of STOPPED
limits the results to
* tasks that Amazon ECS has set the desired status to STOPPED
. This can be useful
- * for debugging tasks that are not starting properly or have died or finished. The default
+ * for debugging tasks that aren't starting properly or have died or finished. The default
* status filter is RUNNING
, which shows tasks that Amazon ECS has set the desired
* status to RUNNING
.
Although you can filter results based on a desired status of PENDING
,
- * this does not return any results. Amazon ECS never sets the desired status of a task to
+ * this doesn't return any results. Amazon ECS never sets the desired status of a task to
* that value (only a task's lastStatus
may have a value of
* PENDING
).
You can apply up to 10 custom attributes per resource. You can view the attributes of - * a resource with ListAttributes. You can remove existing attributes on - * a resource with DeleteAttributes.
+ *You can apply up to 10 custom attributes for each resource. You can view the + * attributes of a resource with ListAttributes. You can remove existing + * attributes on a resource with DeleteAttributes.
*/ export interface AttributeLimitExceededException extends __SmithyException, $MetadataBearer { name: "AttributeLimitExceededException"; @@ -8424,7 +8519,7 @@ export interface PutAttributesRequest { /** *The attributes to apply to your resource. You can specify up to 10 custom attributes - * per resource. You can specify up to 10 attributes in a single call.
+ * for each resource. You can specify up to 10 attributes in a single call. */ attributes: Attribute[] | undefined; } @@ -8457,7 +8552,7 @@ export namespace PutAttributesResponse { export interface PutClusterCapacityProvidersRequest { /** *The short name or full Amazon Resource Name (ARN) of the cluster to modify the capacity provider - * settings for. If you do not specify a cluster, the default cluster is assumed.
+ * settings for. If you don't specify a cluster, the default cluster is assumed. */ cluster: string | undefined; @@ -8519,7 +8614,7 @@ export namespace PutClusterCapacityProvidersResponse { } /** - *The specified resource is in-use and cannot be removed.
+ *The specified resource is in-use and can't be removed.
*/ export interface ResourceInUseException extends __SmithyException, $MetadataBearer { name: "ResourceInUseException"; @@ -8546,14 +8641,14 @@ export enum PlatformDeviceType { */ export interface PlatformDevice { /** - *The ID for the GPU(s) on the container instance. The available GPU IDs can also be + *
The ID for the GPUs on the container instance. The available GPU IDs can also be
* obtained on the container instance in the
* /var/lib/ecs/gpu/nvidia_gpu_info.json
file.
The type of device that is available on the container instance. The only supported + *
The type of device that's available on the container instance. The only supported
* value is GPU
.
The short name or full Amazon Resource Name (ARN) of the cluster with which to register your container - * instance. If you do not specify a cluster, the default cluster is assumed.
+ *The short name or full Amazon Resource Name (ARN) of the cluster to register your container instance + * with. If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; @@ -8597,8 +8692,8 @@ export interface RegisterContainerInstanceRequest { totalResources?: Resource[]; /** - *The version information for the Amazon ECS container agent and Docker daemon running on the - * container instance.
+ *The version information for the Amazon ECS container agent and Docker daemon that runs on + * the container instance.
*/ versionInfo?: VersionInfo; @@ -8620,9 +8715,8 @@ export interface RegisterContainerInstanceRequest { /** *The metadata that you apply to the container instance to help you categorize and - * organize them. Each tag consists of a key and an optional value, both of which you - * define.
- *The following basic restrictions apply to tags:
+ * organize them. Each tag consists of a key and an optional value. You define both. + *The following basic restrictions apply to tags:
*Maximum number of tags per resource - 50
@@ -8684,9 +8778,9 @@ export namespace RegisterContainerInstanceResponse { export interface RegisterTaskDefinitionRequest { /** - *You must specify a family
for a task definition, which allows you to
- * track multiple versions of the same task definition. The family
is used as
- * a name for your task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.
You must specify a family
for a task definition. You can use it track
+ * multiple versions of the same task definition. The family
is used as a name
+ * for your task definition. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.
A list of volume definitions in JSON format that containers in your task may + *
A list of volume definitions in JSON format that containers in your task might * use.
*/ volumes?: Volume[]; /** *An array of placement constraint objects to use for the task. You can specify a - * maximum of 10 constraints per task (this limit includes constraints in the task - * definition and those specified at runtime).
+ * maximum of 10 constraints for each task. This limit includes constraints in the task + * definition and those specified at runtime. */ placementConstraints?: TaskDefinitionPlacementConstraint[]; /** - *The task launch type that Amazon ECS should validate the task definition against. A client + *
The task launch type that Amazon ECS validates the task definition against. A client * exception is returned if the task definition doesn't validate against the * compatibilities specified. If no value is specified, the parameter is omitted from the * response.
@@ -8767,21 +8861,23 @@ export interface RegisterTaskDefinitionRequest { /** *The number of CPU units used by the task. It can be expressed as an integer using CPU
- * units, for example 1024
, or as a string using vCPUs, for example 1
- * vCPU
or 1 vcpu
, in a task definition. String values are
+ * units (for example, 1024
) or as a string using vCPUs (for example, 1
+ * vCPU
or 1 vcpu
) in a task definition. String values are
* converted to an integer indicating the CPU units when the task definition is
* registered.
Task-level CPU and memory parameters are ignored for Windows containers. We * recommend specifying container-level resources for Windows containers.
*If you are using the EC2 launch type, this field is optional. Supported + *
If you're using the EC2 launch type, this field is optional. Supported
* values are between 128
CPU units (0.125
vCPUs) and
* 10240
CPU units (10
vCPUs).
If you are using the Fargate launch type, this field is required and you + *
If you're using the Fargate launch type, this field is required and you
* must use one of the following values, which determines your range of supported values
* for the memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on + * Fargate.
+ *256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
The amount of memory (in MiB) used by the task. It can be expressed as an integer
- * using MiB, for example 1024
, or as a string using GB, for example
- * 1GB
or 1 GB
, in a task definition. String values are
+ * using MiB (for example ,1024
) or as a string using GB (for example,
+ * 1GB
or 1 GB
) in a task definition. String values are
* converted to an integer indicating the MiB when the task definition is
* registered.
If using the EC2 launch type, this field is optional.
*If using the Fargate launch type, this field is required and you must
- * use one of the following values, which determines your range of supported values for the
- * cpu
parameter:
cpu
parameter.
+ * The CPU units cannot be less than 1 vCPU when you use Windows containers on + * Fargate.
+ *512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
The metadata that you apply to the task definition to help you categorize and organize - * them. Each tag consists of a key and an optional value, both of which you define.
+ * them. Each tag consists of a key and an optional value. You define both of them. *The following basic restrictions apply to tags:
*This parameter is only supported for tasks hosted on Fargate using the following platform versions:
+ *This parameter is only supported for tasks hosted on Fargate using + * the following platform versions:
*Linux platform
- * version 1.4.0
or later.
Linux platform version 1.4.0
or later.
Windows platform
- * version 1.0.0
or later.
Windows platform version 1.0.0
or later.
The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.
- *When you specify a task definition in a service, this value must match the runtimePlatform
value of the service.
The operating system that your tasks definitions run on. A platform family is + * specified only for tasks using the Fargate launch type.
+ *When you specify a task definition in a service, this value must match the
+ * runtimePlatform
value of the service.
Your Amazon Web Services account has been blocked. For more information, contact
+ * Your Amazon Web Services account was blocked. For more information, contact
* Amazon Web Services Support.capacityProviderStrategy
or
* launchType
is specified, the
* defaultCapacityProviderStrategy
for the cluster is used.
When you use cluster auto scaling, you must specify capacityProviderStrategy
and not launchType
.
When you use cluster auto scaling, you must specify
+ * capacityProviderStrategy
and not launchType
.
A capacity provider strategy may contain a maximum of 6 capacity providers.
*/ capacityProviderStrategy?: CapacityProviderStrategyItem[]; /** - *The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. + *
The short name or full Amazon Resource Name (ARN) of the cluster to run your task on. * If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; /** *The number of instantiations of the specified task to place on your cluster. You can - * specify up to 10 tasks per call.
+ * specify up to 10 tasks for each call. */ count?: number; @@ -9054,9 +9154,9 @@ export interface RunTaskRequest { enableECSManagedTags?: boolean; /** - *Whether or not to enable the execute command functionality for the containers in this
- * task. If true
, this enables execute command functionality on all containers
- * in the task.
Determines whether to enable the execute command functionality for the containers in
+ * this task. If true
, this enables execute command functionality on all
+ * containers in the task.
The infrastructure on which to run your standalone task. For more information, see - * Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.
+ *The infrastructure to run your standalone task on. For more information, see Amazon ECS + * launch types in the Amazon Elastic Container Service Developer Guide.
*The FARGATE
launch type runs your tasks on Fargate On-Demand
* infrastructure.
The EC2
launch type runs your tasks on Amazon EC2 instances registered to your
* cluster.
The EXTERNAL
launch type runs your tasks on your on-premise server or
+ *
The EXTERNAL
launch type runs your tasks on your on-premises server or
* virtual machine (VM) capacity registered to your cluster.
A task can use either a launch type or a capacity provider strategy. If a
* launchType
is specified, the capacityProviderStrategy
* parameter must be omitted.
When you use cluster auto scaling, you must specify capacityProviderStrategy
and not launchType
.
When you use cluster auto scaling, you must specify
+ * capacityProviderStrategy
and not launchType
.
The network configuration for the task. This parameter is required for task
* definitions that use the awsvpc
network mode to receive their own elastic
- * network interface, and it is not supported for other network modes. For more
- * information, see Task networking
+ * network interface, and it isn't supported for other network modes. For more information,
+ * see Task networking
* in the Amazon Elastic Container Service Developer Guide.
A list of container overrides in JSON format that specify the name of a container in
* the specified task definition and the overrides it should receive. You can override the
- * default command for a container (that is specified in the task definition or Docker
+ * default command for a container (that's specified in the task definition or Docker
* image) with a command
override. You can also override existing environment
* variables (that are specified in the task definition or Docker image) on a container or
* add new environment variables to it with an environment
override.
An array of placement constraint objects to use for the task. You can specify up to 10 - * constraints per task (including constraints in the task definition and those specified - * at runtime).
+ * constraints for each task (including constraints in the task definition and those + * specified at runtime). */ placementConstraints?: PlacementConstraint[]; /** *The placement strategy objects to use for the task. You can specify a maximum of 5 - * strategy rules per task.
+ * strategy rules for each task. */ placementStrategy?: PlacementStrategy[]; /** - *The platform version the task should use. A platform version is only specified for
- * tasks hosted on Fargate. If one is not specified, the LATEST
- * platform version is used by default. For more information, see Fargate platform versions in the
- * Amazon Elastic Container Service Developer Guide.
The platform version the task uses. A platform version is only specified for tasks
+ * hosted on Fargate. If one isn't specified, the LATEST
+ * platform version is used. For more information, see Fargate platform
+ * versions in the Amazon Elastic Container Service Developer Guide.
Specifies whether to propagate the tags from the task definition to the task. If no - * value is specified, the tags are not propagated. Tags can only be propagated to the task + * value is specified, the tags aren't propagated. Tags can only be propagated to the task * during task creation. To add tags to a task after task creation, use the TagResource API action.
*An error will be received if you specify the SERVICE
option when
@@ -9152,7 +9253,7 @@ export interface RunTaskRequest {
* job to your task with the startedBy
parameter. You can then identify which
* tasks belong to that job by filtering the results of a ListTasks call
* with the startedBy
value. Up to 36 letters (uppercase and lowercase),
- * numbers, hyphens, and underscores are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter
* contains the deployment ID of the service that starts it.
The family
and revision
(family:revision
) or
- * full ARN of the task definition to run. If a revision
is not specified,
+ * full ARN of the task definition to run. If a revision
isn't specified,
* the latest ACTIVE
revision is used.
The full ARN value must match the value that you specified ias the Resource
- * of the IAM principal's permissions policy. For example, if the Resource
is
+ *
The full ARN value must match the value that you specified as the
+ * Resource
of the IAM principal's permissions policy. For example, if the
+ * Resource
is
* arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*, the
* taskDefinition
ARN value must be
* arn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
.
The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. + *
The short name or full Amazon Resource Name (ARN) of the cluster where to start your task. * If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; /** - *The container instance IDs or full ARN entries for the container instances on which - * you would like to place your task. You can specify up to 10 container instances.
+ *The container instance IDs or full ARN entries for the container instances where you + * would like to place your task. You can specify up to 10 container instances.
*/ containerInstances: string[] | undefined; @@ -9280,8 +9382,8 @@ export interface StartTaskRequest { /** *A list of container overrides in JSON format that specify the name of a container in
- * the specified task definition and the overrides it should receive. You can override the
- * default command for a container (that is specified in the task definition or Docker
+ * the specified task definition and the overrides it receives. You can override the
+ * default command for a container (that's specified in the task definition or Docker
* image) with a command
override. You can also override existing environment
* variables (that are specified in the task definition or Docker image) on a container or
* add new environment variables to it with an environment
override.
Specifies whether to propagate the tags from the task definition or the service to the - * task. If no value is specified, the tags are not propagated.
+ * task. If no value is specified, the tags aren't propagated. */ propagateTags?: PropagateTags | string; @@ -9309,8 +9411,8 @@ export interface StartTaskRequest { * job to your task with thestartedBy
parameter. You can then identify which
* tasks belong to that job by filtering the results of a ListTasks call
* with the startedBy
value. Up to 36 letters (uppercase and lowercase),
- * numbers, hyphens, and underscores are allowed.
- * If a task is started by an Amazon ECS service, then the startedBy
parameter
+ * numbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter
* contains the deployment ID of the service that starts it.
The family
and revision
(family:revision
) or
- * full ARN of the task definition to start. If a revision
is not specified,
+ * full ARN of the task definition to start. If a revision
isn't specified,
* the latest ACTIVE
revision is used.
An optional message specified when a task is stopped. For example, if you are using a + *
An optional message specified when a task is stopped. For example, if you're using a * custom scheduler, you can use this parameter to specify the reason for stopping the task * here, and the message appears in subsequent DescribeTasks API * operations on this task. Up to 255 characters are allowed in this message.
@@ -9526,7 +9628,7 @@ export interface SubmitContainerStateChangeRequest { status?: string; /** - *The exit code returned for the state change request.
+ *The exit code that's returned for the state change request.
*/ exitCode?: number; @@ -9567,7 +9669,7 @@ export namespace SubmitContainerStateChangeResponse { } /** - *An object representing a change in state for a container.
+ *An object that represents a change in state for a container.
*/ export interface ContainerStateChange { /** @@ -9592,7 +9694,7 @@ export interface ContainerStateChange { exitCode?: number; /** - *Any network bindings associated with the container.
+ *Any network bindings that are associated with the container.
*/ networkBindings?: NetworkBinding[]; @@ -9621,7 +9723,7 @@ export namespace ContainerStateChange { */ export interface ManagedAgentStateChange { /** - *The name of the container associated with the managed agent.
+ *The name of the container that's associated with the managed agent.
*/ containerName: string | undefined; @@ -9672,7 +9774,7 @@ export interface SubmitTaskStateChangeRequest { reason?: string; /** - *Any containers associated with the state change request.
+ *Any containers that's associated with the state change request.
*/ containers?: ContainerStateChange[]; @@ -9682,22 +9784,22 @@ export interface SubmitTaskStateChangeRequest { attachments?: AttachmentStateChange[]; /** - *The details for the managed agent associated with the task.
+ *The details for the managed agent that's associated with the task.
*/ managedAgents?: ManagedAgentStateChange[]; /** - *The Unix timestamp for when the container image pull began.
+ *The Unix timestamp for the time when the container image pull started.
*/ pullStartedAt?: Date; /** - *The Unix timestamp for when the container image pull completed.
+ *The Unix timestamp for the time when the container image pull completed.
*/ pullStoppedAt?: Date; /** - *The Unix timestamp for when the task execution stopped.
+ *The Unix timestamp for the time when the task execution stopped.
*/ executionStoppedAt?: Date; } @@ -9728,7 +9830,7 @@ export namespace SubmitTaskStateChangeResponse { } /** - *The specified resource could not be found.
+ *The specified resource wasn't found.
*/ export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { name: "ResourceNotFoundException"; @@ -9747,8 +9849,8 @@ export namespace ResourceNotFoundException { export interface TagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources - * are Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container + *
The Amazon Resource Name (ARN) of the resource to add tags to. Currently, the supported resources are + * Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container * instances.
*/ resourceArn: string | undefined; @@ -9812,9 +9914,9 @@ export namespace TagResourceResponse { export interface UntagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported - * resources are Amazon ECS capacity providers, tasks, services, task definitions, clusters, and - * container instances.
+ *The Amazon Resource Name (ARN) of the resource to delete tags from. Currently, the supported resources + * are Amazon ECS capacity providers, tasks, services, task definitions, clusters, and container + * instances.
*/ resourceArn: string | undefined; @@ -9859,14 +9961,14 @@ export interface AutoScalingGroupProviderUpdate { * protection. *When using managed termination protection, managed scaling must also be used - * otherwise managed termination protection will not work.
+ * otherwise managed termination protection doesn't work. *When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in * an Auto Scaling group that contain tasks from being terminated during a scale-in action. * The Auto Scaling group and each instance in the Auto Scaling group must have instance - * protection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.
- *When managed termination protection is disabled, your Amazon EC2 instances are not - * protected from termination when the Auto Scaling group scales in.
+ * protection from scale-in actions enabled. For more information, see Instance Protection in the Auto Scaling User Guide. + *When managed termination protection is disabled, your Amazon EC2 instances aren't protected + * from termination when the Auto Scaling group scales in.
*/ managedTerminationProtection?: ManagedTerminationProtection | string; } @@ -9887,7 +9989,7 @@ export interface UpdateCapacityProviderRequest { name: string | undefined; /** - *An object representing the parameters to update for the Auto Scaling group capacity + *
An object that represent the parameters to update for the Auto Scaling group capacity * provider.
*/ autoScalingGroupProvider: AutoScalingGroupProviderUpdate | undefined; @@ -9968,7 +10070,7 @@ export interface UpdateClusterSettingsRequest { /** *The setting to use by default for a cluster. This parameter is used to enable CloudWatch
- * Container Insights for a cluster. If this value is specified, it will override the
+ * Container Insights for a cluster. If this value is specified, it overrides the
* containerInsights
value set with PutAccountSetting or
* PutAccountSettingDefault.
Amazon ECS is unable to determine the current version of the Amazon ECS container agent on the - * container instance and does not have enough information to proceed with an update. This - * could be because the agent running on the container instance is an older or custom - * version that does not use our version information.
+ *Amazon ECS can't determine the current version of the Amazon ECS container agent on the + * container instance and doesn't have enough information to proceed with an update. This + * could be because the agent running on the container instance is a previous or custom + * version that doesn't use our version information.
*/ export interface MissingVersionException extends __SmithyException, $MetadataBearer { name: "MissingVersionException"; @@ -10022,9 +10124,9 @@ export namespace MissingVersionException { } /** - *There is no update available for this Amazon ECS container agent. This could be because the - * agent is already running the latest version, or it is so old that there is no update - * path to the current version.
+ *There's no update available for this Amazon ECS container agent. This might be because the + * agent is already running the latest version or because it's so old that there's no + * update path to the current version.
*/ export interface NoUpdateAvailableException extends __SmithyException, $MetadataBearer { name: "NoUpdateAvailableException"; @@ -10049,8 +10151,8 @@ export interface UpdateContainerAgentRequest { cluster?: string; /** - *The container instance ID or full ARN entries for the container instance on which - * you would like to update the Amazon ECS container agent.
+ *The container instance ID or full ARN entries for the container instance where you + * would like to update the Amazon ECS container agent.
*/ containerInstance: string | undefined; } @@ -10066,7 +10168,7 @@ export namespace UpdateContainerAgentRequest { export interface UpdateContainerAgentResponse { /** - *The container instance for which the container agent was updated.
+ *The container instance that the container agent was updated for.
*/ containerInstance?: ContainerInstance; } @@ -10093,13 +10195,12 @@ export interface UpdateContainerInstancesStateRequest { containerInstances: string[] | undefined; /** - *The container instance state with which to update the container instance. The only
- * valid values for this action are ACTIVE
and DRAINING
. A
- * container instance can only be updated to DRAINING
status once it has
- * reached an ACTIVE
state. If a container instance is in
- * REGISTERING
, DEREGISTERING
, or
- * REGISTRATION_FAILED
state you can describe the container instance but
- * will be unable to update the container instance state.
The container instance state to update the container instance with. The only valid
+ * values for this action are ACTIVE
and DRAINING
. A container
+ * instance can only be updated to DRAINING
status once it has reached an
+ * ACTIVE
state. If a container instance is in REGISTERING
,
+ * DEREGISTERING
, or REGISTRATION_FAILED
state you can
+ * describe the container instance but can't update the container instance state.
The short name or full Amazon Resource Name (ARN) of the cluster that your service is running on. + *
The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. * If you do not specify a cluster, the default cluster is assumed.
*/ cluster?: string; @@ -10163,11 +10264,11 @@ export interface UpdateServiceRequest { /** *The capacity provider strategy to update the service to use.
- *If the service is using the default capacity provider strategy for the cluster, the + *
if the service uses the default capacity provider strategy for the cluster, the * service can be updated to use one or more capacity providers as opposed to the default * capacity provider strategy. However, when a service is using a capacity provider - * strategy that is not the default capacity provider strategy, the service cannot be - * updated to use the cluster's default capacity provider strategy.
+ * strategy that's not the default capacity provider strategy, the service can't be updated + * to use the cluster's default capacity provider strategy. *A capacity provider strategy consists of one or more capacity providers along with the
* base
and weight
to assign to them. A capacity provider
* must be associated with the cluster to be used in a capacity provider strategy. The
@@ -10203,8 +10304,8 @@ export interface UpdateServiceRequest {
* unchanged. If this value is specified, it will override any existing placement
* constraints defined for the service. To remove all existing placement constraints,
* specify an empty array.
You can specify a maximum of 10 constraints per task (this limit includes constraints - * in the task definition and those specified at runtime).
+ *You can specify a maximum of 10 constraints for each task. This limit includes + * constraints in the task definition and those specified at runtime.
*/ placementConstraints?: PlacementConstraint[]; @@ -10213,37 +10314,37 @@ export interface UpdateServiceRequest { * specified, the existing placement strategy for the service will remain unchanged. If * this value is specified, it will override the existing placement strategy defined for * the service. To remove an existing placement strategy, specify an empty object. - *You can specify a maximum of five strategy rules per service.
+ *You can specify a maximum of five strategy rules for each service.
*/ placementStrategy?: PlacementStrategy[]; /** - *The platform version on which your tasks in the service are running. A platform
- * version is only specified for tasks using the Fargate launch type. If a
- * platform version is not specified, the The platform version that your tasks in the service run on. A platform version is only
+ * specified for tasks using the Fargate launch type. If a platform version
+ * is not specified, the Whether to force a new deployment of the service. Deployments are not forced by
- * default. You can use this option to trigger a new deployment with no service definition
- * changes. For example, you can update a service's tasks to use a newer Docker image with
- * the same image/tag combination ( Determines whether to force a new deployment of the service. By default, deployments
+ * aren't forced. You can use this option to start a new deployment with no service
+ * definition changes. For example, you can update a service's tasks to use a newer Docker
+ * image with the same image/tag combination ( The period of time, in seconds, that the Amazon ECS service scheduler should ignore
- * unhealthy Elastic Load Balancing target health checks after a task has first started. This is only valid
- * if your service is configured to use a load balancer. If your service's tasks take a
- * while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace
- * period of up to 2,147,483,647 seconds. During that time, the Amazon ECS service
- * scheduler ignores the Elastic Load Balancing health check status. This grace period can prevent the ECS
- * service scheduler from marking tasks as unhealthy and stopping them before they have
- * time to come up. The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy
+ * Elastic Load Balancing target health checks after a task has first started. This is only valid if your
+ * service is configured to use a load balancer. If your service's tasks take a while to
+ * start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of
+ * up to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler ignores
+ * the Elastic Load Balancing health check status. This grace period can prevent the ECS service scheduler
+ * from marking tasks as unhealthy and stopping them before they have time to come
+ * up. The short name or full Amazon Resource Name (ARN) of the cluster that hosts the service that the task
- * set exists in.LATEST
platform version is used by
- * default. For more information, see Fargate Platform
+ * LATEST
platform version is used. For more
+ * information, see Fargate Platform
* Versions in the Amazon Elastic Container Service Developer Guide.my_image:latest
) or to roll Fargate tasks
- * onto a newer platform version.my_image:latest
) or to roll
+ * Fargate tasks onto a newer platform version.
The short name or full Amazon Resource Name (ARN) of the service that the task set exists in.
+ *The short name or full Amazon Resource Name (ARN) of the service that the task set is found in.
*/ service: string | undefined; diff --git a/clients/client-ecs/src/protocols/Aws_json1_1.ts b/clients/client-ecs/src/protocols/Aws_json1_1.ts index e85a62006ed9e..efcf899959245 100644 --- a/clients/client-ecs/src/protocols/Aws_json1_1.ts +++ b/clients/client-ecs/src/protocols/Aws_json1_1.ts @@ -177,6 +177,7 @@ import { ContainerDependency, ContainerInstance, ContainerInstanceField, + ContainerInstanceHealthStatus, ContainerOverride, ContainerStateChange, CreateCapacityProviderRequest, @@ -243,6 +244,7 @@ import { HostVolumeProperties, InferenceAccelerator, InferenceAcceleratorOverride, + InstanceHealthCheckResult, InvalidParameterException, KernelCapabilities, KeyValuePair, @@ -8344,6 +8346,10 @@ const deserializeAws_json1_1ContainerInstance = (output: any, context: __SerdeCo capacityProviderName: __expectString(output.capacityProviderName), containerInstanceArn: __expectString(output.containerInstanceArn), ec2InstanceId: __expectString(output.ec2InstanceId), + healthStatus: + output.healthStatus !== undefined && output.healthStatus !== null + ? deserializeAws_json1_1ContainerInstanceHealthStatus(output.healthStatus, context) + : undefined, pendingTasksCount: __expectInt32(output.pendingTasksCount), registeredAt: output.registeredAt !== undefined && output.registeredAt !== null @@ -8370,6 +8376,19 @@ const deserializeAws_json1_1ContainerInstance = (output: any, context: __SerdeCo } as any; }; +const deserializeAws_json1_1ContainerInstanceHealthStatus = ( + output: any, + context: __SerdeContext +): ContainerInstanceHealthStatus => { + return { + details: + output.details !== undefined && output.details !== null + ? deserializeAws_json1_1InstanceHealthCheckResultList(output.details, context) + : undefined, + overallStatus: __expectString(output.overallStatus), + } as any; +}; + const deserializeAws_json1_1ContainerInstances = (output: any, context: __SerdeContext): ContainerInstance[] => { return (output || []) .filter((e: any) => e != null) @@ -9067,6 +9086,38 @@ const deserializeAws_json1_1InferenceAccelerators = (output: any, context: __Ser }); }; +const deserializeAws_json1_1InstanceHealthCheckResult = ( + output: any, + context: __SerdeContext +): InstanceHealthCheckResult => { + return { + lastStatusChange: + output.lastStatusChange !== undefined && output.lastStatusChange !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastStatusChange))) + : undefined, + lastUpdated: + output.lastUpdated !== undefined && output.lastUpdated !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdated))) + : undefined, + status: __expectString(output.status), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_json1_1InstanceHealthCheckResultList = ( + output: any, + context: __SerdeContext +): InstanceHealthCheckResult[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1InstanceHealthCheckResult(entry, context); + }); +}; + const deserializeAws_json1_1InvalidParameterException = ( output: any, context: __SerdeContext diff --git a/clients/client-efs/src/endpoints.ts b/clients/client-efs/src/endpoints.ts index d6e5156e559c3..4c8069baa2b64 100644 --- a/clients/client-efs/src/endpoints.ts +++ b/clients/client-efs/src/endpoints.ts @@ -412,6 +412,10 @@ const partitionHash: PartitionHash = { hostname: "elasticfilesystem.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticfilesystem-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -422,6 +426,10 @@ const partitionHash: PartitionHash = { hostname: "elasticfilesystem.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticfilesystem-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-eks/src/endpoints.ts b/clients/client-eks/src/endpoints.ts index 4daaedc8eb717..e5e54c82b3792 100644 --- a/clients/client-eks/src/endpoints.ts +++ b/clients/client-eks/src/endpoints.ts @@ -147,6 +147,10 @@ const partitionHash: PartitionHash = { hostname: "eks.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "eks-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -157,6 +161,10 @@ const partitionHash: PartitionHash = { hostname: "eks.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "eks-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elastic-beanstalk/src/endpoints.ts b/clients/client-elastic-beanstalk/src/endpoints.ts index 693ea07524a3c..8509fd869001a 100644 --- a/clients/client-elastic-beanstalk/src/endpoints.ts +++ b/clients/client-elastic-beanstalk/src/endpoints.ts @@ -149,6 +149,10 @@ const partitionHash: PartitionHash = { hostname: "elasticbeanstalk.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticbeanstalk-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -159,6 +163,10 @@ const partitionHash: PartitionHash = { hostname: "elasticbeanstalk.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticbeanstalk-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elastic-inference/src/endpoints.ts b/clients/client-elastic-inference/src/endpoints.ts index 6c155a9ea6194..caf4e49ca4547 100644 --- a/clients/client-elastic-inference/src/endpoints.ts +++ b/clients/client-elastic-inference/src/endpoints.ts @@ -127,6 +127,10 @@ const partitionHash: PartitionHash = { hostname: "api.elastic-inference.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "api.elastic-inference-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -137,6 +141,10 @@ const partitionHash: PartitionHash = { hostname: "api.elastic-inference.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "api.elastic-inference-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elastic-load-balancing-v2/src/endpoints.ts b/clients/client-elastic-load-balancing-v2/src/endpoints.ts index 363c91b11d93e..772060e2a304f 100644 --- a/clients/client-elastic-load-balancing-v2/src/endpoints.ts +++ b/clients/client-elastic-load-balancing-v2/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "elasticloadbalancing.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticloadbalancing-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "elasticloadbalancing.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticloadbalancing-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elastic-load-balancing/src/endpoints.ts b/clients/client-elastic-load-balancing/src/endpoints.ts index 363c91b11d93e..772060e2a304f 100644 --- a/clients/client-elastic-load-balancing/src/endpoints.ts +++ b/clients/client-elastic-load-balancing/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "elasticloadbalancing.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticloadbalancing-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "elasticloadbalancing.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticloadbalancing-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elastic-transcoder/src/endpoints.ts b/clients/client-elastic-transcoder/src/endpoints.ts index d429ef0d3a841..fcbe5d1e580dd 100644 --- a/clients/client-elastic-transcoder/src/endpoints.ts +++ b/clients/client-elastic-transcoder/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "elastictranscoder.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elastictranscoder-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "elastictranscoder.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elastictranscoder-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elasticache/src/endpoints.ts b/clients/client-elasticache/src/endpoints.ts index 1d33150fd24b8..3b32cfa650c25 100644 --- a/clients/client-elasticache/src/endpoints.ts +++ b/clients/client-elasticache/src/endpoints.ts @@ -144,6 +144,10 @@ const partitionHash: PartitionHash = { hostname: "elasticache.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticache-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -154,6 +158,10 @@ const partitionHash: PartitionHash = { hostname: "elasticache.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticache-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-elasticsearch-service/src/endpoints.ts b/clients/client-elasticsearch-service/src/endpoints.ts index 97f80484fbb89..c1f5bfbc7c09b 100644 --- a/clients/client-elasticsearch-service/src/endpoints.ts +++ b/clients/client-elasticsearch-service/src/endpoints.ts @@ -156,6 +156,10 @@ const partitionHash: PartitionHash = { hostname: "es.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "es-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -166,6 +170,10 @@ const partitionHash: PartitionHash = { hostname: "es.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "es-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-emr-containers/src/endpoints.ts b/clients/client-emr-containers/src/endpoints.ts index 037b282d32453..f048d3d3156fa 100644 --- a/clients/client-emr-containers/src/endpoints.ts +++ b/clients/client-emr-containers/src/endpoints.ts @@ -144,6 +144,10 @@ const partitionHash: PartitionHash = { hostname: "emr-containers.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "emr-containers-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -154,6 +158,10 @@ const partitionHash: PartitionHash = { hostname: "emr-containers.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "emr-containers-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-emr/src/endpoints.ts b/clients/client-emr/src/endpoints.ts index a19b63f04993f..aec5a47861491 100644 --- a/clients/client-emr/src/endpoints.ts +++ b/clients/client-emr/src/endpoints.ts @@ -168,6 +168,10 @@ const partitionHash: PartitionHash = { hostname: "elasticmapreduce.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "elasticmapreduce-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -178,6 +182,10 @@ const partitionHash: PartitionHash = { hostname: "elasticmapreduce.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "elasticmapreduce-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-eventbridge/src/endpoints.ts b/clients/client-eventbridge/src/endpoints.ts index deb0ab8df3b95..6e6b75d81970c 100644 --- a/clients/client-eventbridge/src/endpoints.ts +++ b/clients/client-eventbridge/src/endpoints.ts @@ -149,6 +149,10 @@ const partitionHash: PartitionHash = { hostname: "events.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "events-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -159,6 +163,10 @@ const partitionHash: PartitionHash = { hostname: "events.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "events-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-finspace-data/src/endpoints.ts b/clients/client-finspace-data/src/endpoints.ts index 7a6f4725fb79d..ce36509298d82 100644 --- a/clients/client-finspace-data/src/endpoints.ts +++ b/clients/client-finspace-data/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "finspace-api.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "finspace-api-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "finspace-api.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "finspace-api-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-finspace/src/endpoints.ts b/clients/client-finspace/src/endpoints.ts index 4983ea8a38a78..0745a29ea6d1f 100644 --- a/clients/client-finspace/src/endpoints.ts +++ b/clients/client-finspace/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "finspace.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "finspace-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "finspace.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "finspace-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-firehose/src/endpoints.ts b/clients/client-firehose/src/endpoints.ts index 9139e3ea0b93c..4a1d13a9d68eb 100644 --- a/clients/client-firehose/src/endpoints.ts +++ b/clients/client-firehose/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "firehose.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "firehose-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "firehose.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "firehose-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-fis/src/endpoints.ts b/clients/client-fis/src/endpoints.ts index 73a4dae6cf54b..e3f84979efe5a 100644 --- a/clients/client-fis/src/endpoints.ts +++ b/clients/client-fis/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "fis.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "fis-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "fis.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "fis-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-fms/src/endpoints.ts b/clients/client-fms/src/endpoints.ts index 8359972a9ecb2..30a5811de4937 100644 --- a/clients/client-fms/src/endpoints.ts +++ b/clients/client-fms/src/endpoints.ts @@ -350,6 +350,10 @@ const partitionHash: PartitionHash = { hostname: "fms.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "fms-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -360,6 +364,10 @@ const partitionHash: PartitionHash = { hostname: "fms.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "fms-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-forecast/src/endpoints.ts b/clients/client-forecast/src/endpoints.ts index d0256f755ff76..d161f7c98b108 100644 --- a/clients/client-forecast/src/endpoints.ts +++ b/clients/client-forecast/src/endpoints.ts @@ -118,6 +118,10 @@ const partitionHash: PartitionHash = { hostname: "forecast.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "forecast-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -128,6 +132,10 @@ const partitionHash: PartitionHash = { hostname: "forecast.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "forecast-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-forecastquery/src/endpoints.ts b/clients/client-forecastquery/src/endpoints.ts index 06a354a96a4b1..3de6502fc3551 100644 --- a/clients/client-forecastquery/src/endpoints.ts +++ b/clients/client-forecastquery/src/endpoints.ts @@ -118,6 +118,10 @@ const partitionHash: PartitionHash = { hostname: "forecastquery.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "forecastquery-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -128,6 +132,10 @@ const partitionHash: PartitionHash = { hostname: "forecastquery.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "forecastquery-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-frauddetector/src/endpoints.ts b/clients/client-frauddetector/src/endpoints.ts index ac54ca6ccafc9..fcd60ba767bd4 100644 --- a/clients/client-frauddetector/src/endpoints.ts +++ b/clients/client-frauddetector/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "frauddetector.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "frauddetector-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "frauddetector.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "frauddetector-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-fsx/src/endpoints.ts b/clients/client-fsx/src/endpoints.ts index c3aaee928d6ad..8e1ba9216a83e 100644 --- a/clients/client-fsx/src/endpoints.ts +++ b/clients/client-fsx/src/endpoints.ts @@ -178,6 +178,10 @@ const partitionHash: PartitionHash = { hostname: "fsx.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "fsx-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -188,6 +192,10 @@ const partitionHash: PartitionHash = { hostname: "fsx.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "fsx-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-gamelift/src/endpoints.ts b/clients/client-gamelift/src/endpoints.ts index 398dd8ce2852e..7598e18749bc0 100644 --- a/clients/client-gamelift/src/endpoints.ts +++ b/clients/client-gamelift/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "gamelift.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "gamelift-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "gamelift.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "gamelift-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-glacier/src/endpoints.ts b/clients/client-glacier/src/endpoints.ts index 4660c9730e88e..4bbb17c9accc5 100644 --- a/clients/client-glacier/src/endpoints.ts +++ b/clients/client-glacier/src/endpoints.ts @@ -162,6 +162,10 @@ const partitionHash: PartitionHash = { hostname: "glacier.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "glacier-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -172,6 +176,10 @@ const partitionHash: PartitionHash = { hostname: "glacier.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "glacier-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-global-accelerator/src/endpoints.ts b/clients/client-global-accelerator/src/endpoints.ts index d0716d3dfa3d9..fb7882dc0790e 100644 --- a/clients/client-global-accelerator/src/endpoints.ts +++ b/clients/client-global-accelerator/src/endpoints.ts @@ -78,6 +78,10 @@ const partitionHash: PartitionHash = { hostname: "globalaccelerator.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "globalaccelerator-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -88,6 +92,10 @@ const partitionHash: PartitionHash = { hostname: "globalaccelerator.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "globalaccelerator-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-glue/src/endpoints.ts b/clients/client-glue/src/endpoints.ts index 2d9d5dc8986a4..a000713898a69 100644 --- a/clients/client-glue/src/endpoints.ts +++ b/clients/client-glue/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "glue.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "glue-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "glue.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "glue-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-grafana/src/endpoints.ts b/clients/client-grafana/src/endpoints.ts index 0372d7945f360..b76c9bdf3a0d9 100644 --- a/clients/client-grafana/src/endpoints.ts +++ b/clients/client-grafana/src/endpoints.ts @@ -169,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "grafana.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "grafana-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -179,6 +183,10 @@ const partitionHash: PartitionHash = { hostname: "grafana.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "grafana-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-greengrass/src/endpoints.ts b/clients/client-greengrass/src/endpoints.ts index 1731269f0bf2f..02011ff9d8505 100644 --- a/clients/client-greengrass/src/endpoints.ts +++ b/clients/client-greengrass/src/endpoints.ts @@ -119,6 +119,10 @@ const partitionHash: PartitionHash = { hostname: "greengrass.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "greengrass-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -129,6 +133,10 @@ const partitionHash: PartitionHash = { hostname: "greengrass.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "greengrass-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-greengrassv2/src/GreengrassV2.ts b/clients/client-greengrassv2/src/GreengrassV2.ts index 6ca139c110364..331be1dc3c4aa 100644 --- a/clients/client-greengrassv2/src/GreengrassV2.ts +++ b/clients/client-greengrassv2/src/GreengrassV2.ts @@ -296,6 +296,9 @@ export class GreengrassV2 extends GreengrassV2Client { *To create a component from a Lambda function, specify lambdaFunction
* when you call this operation.
IoT Greengrass currently supports Lambda functions on only Linux core devices.
+ *To create a component from a Lambda function, specify lambdaFunction
* when you call this operation.
IoT Greengrass currently supports Lambda functions on only Linux core devices.
+ *The POSIX system user and (optional) group to use to run this component. Specify the user
- * and group separated by a colon (:
) in the following format:
+ *
The POSIX system user and, optionally, group to use to run this component on Linux core
+ * devices. The user, and group if specified, must exist on each Linux core device. Specify the
+ * user and group separated by a colon (:
) in the following format:
* user:group
. The group is optional. If you don't specify a group, the IoT Greengrass Core
* software uses the primary user for the group.
If you omit this parameter, the IoT Greengrass Core software uses the default system user and group that @@ -726,11 +727,21 @@ export interface ComponentRunWith { posixUser?: string; /** - *
The system resource limits to apply to this component's process on the core device.
+ *The system resource limits to apply to this component's process on the core device. IoT Greengrass + * currently supports this feature on only Linux core devices.
*If you omit this parameter, the IoT Greengrass Core software uses the default system resource limits * that you configure on the Greengrass nucleus component. For more information, see Configure system resource limits for components.
*/ systemResourceLimits?: SystemResourceLimits; + + /** + *The Windows user to use to run this component on Windows core devices. The user must exist + * on each Windows core device, and its name and password must be in the LocalSystem account's + * Credentials Manager instance.
+ *If you omit this parameter, the IoT Greengrass Core software uses the default Windows user that you + * configure on the Greengrass nucleus component. For more information, see Configure the user and group that run components.
+ */ + windowsUser?: string; } export namespace ComponentRunWith { diff --git a/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts b/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts index e89e4d10409ad..d62bb4d3855eb 100644 --- a/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts +++ b/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts @@ -3370,6 +3370,7 @@ const serializeAws_restJson1ComponentRunWith = (input: ComponentRunWith, context input.systemResourceLimits !== null && { systemResourceLimits: serializeAws_restJson1SystemResourceLimits(input.systemResourceLimits, context), }), + ...(input.windowsUser !== undefined && input.windowsUser !== null && { windowsUser: input.windowsUser }), }; }; @@ -3952,6 +3953,7 @@ const deserializeAws_restJson1ComponentRunWith = (output: any, context: __SerdeC output.systemResourceLimits !== undefined && output.systemResourceLimits !== null ? deserializeAws_restJson1SystemResourceLimits(output.systemResourceLimits, context) : undefined, + windowsUser: __expectString(output.windowsUser), } as any; }; diff --git a/clients/client-groundstation/src/endpoints.ts b/clients/client-groundstation/src/endpoints.ts index d8af8da5bd7d0..25eff145a2677 100644 --- a/clients/client-groundstation/src/endpoints.ts +++ b/clients/client-groundstation/src/endpoints.ts @@ -118,6 +118,10 @@ const partitionHash: PartitionHash = { hostname: "groundstation.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "groundstation-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -128,6 +132,10 @@ const partitionHash: PartitionHash = { hostname: "groundstation.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "groundstation-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-guardduty/src/endpoints.ts b/clients/client-guardduty/src/endpoints.ts index 375faed4c1614..20cf516eaeaca 100644 --- a/clients/client-guardduty/src/endpoints.ts +++ b/clients/client-guardduty/src/endpoints.ts @@ -155,6 +155,10 @@ const partitionHash: PartitionHash = { hostname: "guardduty.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "guardduty-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -165,6 +169,10 @@ const partitionHash: PartitionHash = { hostname: "guardduty.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "guardduty-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-health/README.md b/clients/client-health/README.md index e92be54c78ee6..3d2460870895e 100644 --- a/clients/client-health/README.md +++ b/clients/client-health/README.md @@ -7,46 +7,46 @@ AWS SDK for JavaScript Health Client for Node.js, Browser and React Native. -The AWS Health API provides programmatic access to the AWS Health information that -appears in the AWS Personal Health Dashboard. You -can use the API operations to get information about AWS Health events that affect your -AWS services and resources.
+The Health API provides programmatic access to the Health information that +appears in the Personal Health Dashboard. You +can use the API operations to get information about events that might affect your Amazon Web Services +services and resources.
You must have a Business or Enterprise Support plan from AWS Support to use the -AWS Health API. If you call the AWS Health API from an AWS account that +
You must have a Business or Enterprise Support plan from Amazon Web Services Support to use the
+Health API. If you call the Health API from an Amazon Web Services account that
doesn't have a Business or Enterprise Support plan, you receive a
SubscriptionRequiredException
error.
You can use the AWS Health endpoint health.us-east-1.amazonaws.com (HTTPS) to -call the AWS Health API operations. AWS Health supports a multi-Region +
You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to +call the Health API operations. Health supports a multi-Region application architecture and has two regional endpoints in an active-passive configuration. You can use the high availability endpoint example to determine -which AWS Region is active, so that you can get the latest information from the -API. For more information, see Accessing the AWS Health API in the -AWS Health User Guide.
+which Amazon Web Services Region is active, so that you can get the latest information from the +API. For more information, see Accessing the Health API in the +Health User Guide.For authentication of requests, AWS Health uses the Signature Version 4 Signing
+ For authentication of requests, Health uses the Signature Version 4 Signing
Process. If your AWS account is part of AWS Organizations, you can use the AWS Health organizational
-view feature. This feature provides a centralized view of AWS Health events across all
-accounts in your organization. You can aggregate AWS Health events in real time to
+ If your Amazon Web Services account is part of Organizations, you can use the Health organizational
+view feature. This feature provides a centralized view of Health events across all
+accounts in your organization. You can aggregate Health events in real time to
identify accounts in your organization that are affected by an operational event or get
notified of security vulnerabilities. Use the organizational view API operations to enable
this feature and return event information. For more information, see Aggregating
-AWS Health events in the AWS Health User Guide.
When you use the AWS Health API operations to return AWS Health events, see the +
When you use the Health API operations to return Health events, see the following recommendations:
Use the eventScopeCode parameter to specify whether to return AWS Health +
Use the eventScopeCode parameter to specify whether to return Health events that are public or account-specific.
The AWS Health API provides programmatic access to the AWS Health information that - * appears in the AWS Personal Health Dashboard. You - * can use the API operations to get information about AWS Health events that affect your - * AWS services and resources.
+ *The Health API provides programmatic access to the Health information that + * appears in the Personal Health Dashboard. You + * can use the API operations to get information about events that might affect your Amazon Web Services + * services and resources.
*You must have a Business or Enterprise Support plan from AWS Support to use the - * AWS Health API. If you call the AWS Health API from an AWS account that + *
You must have a Business or Enterprise Support plan from Amazon Web Services Support to use the
+ * Health API. If you call the Health API from an Amazon Web Services account that
* doesn't have a Business or Enterprise Support plan, you receive a
* SubscriptionRequiredException
error.
You can use the AWS Health endpoint health.us-east-1.amazonaws.com (HTTPS) to - * call the AWS Health API operations. AWS Health supports a multi-Region + *
You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to + * call the Health API operations. Health supports a multi-Region * application architecture and has two regional endpoints in an active-passive * configuration. You can use the high availability endpoint example to determine - * which AWS Region is active, so that you can get the latest information from the - * API. For more information, see Accessing the AWS Health API in the - * AWS Health User Guide.
+ * which Amazon Web Services Region is active, so that you can get the latest information from the + * API. For more information, see Accessing the Health API in the + * Health User Guide. *For authentication of requests, AWS Health uses the Signature Version 4 Signing
+ * For authentication of requests, Health uses the Signature Version 4 Signing
* Process. If your AWS account is part of AWS Organizations, you can use the AWS Health organizational
- * view feature. This feature provides a centralized view of AWS Health events across all
- * accounts in your organization. You can aggregate AWS Health events in real time to
+ * If your Amazon Web Services account is part of Organizations, you can use the Health organizational
+ * view feature. This feature provides a centralized view of Health events across all
+ * accounts in your organization. You can aggregate Health events in real time to
* identify accounts in your organization that are affected by an operational event or get
* notified of security vulnerabilities. Use the organizational view API operations to enable
* this feature and return event information. For more information, see Aggregating
- * AWS Health events in the AWS Health User Guide.
When you use the AWS Health API operations to return AWS Health events, see the + *
When you use the Health API operations to return Health events, see the * following recommendations:
*Use the eventScopeCode parameter to specify whether to return AWS Health + *
Use the eventScopeCode parameter to specify whether to return Health * events that are public or account-specific.
*Returns a list of accounts in the organization from AWS Organizations that are affected by the - * provided event. For more information about the different types of AWS Health events, see + *
Returns a list of accounts in the organization from Organizations that are affected by the + * provided event. For more information about the different types of Health events, see * Event.
- *Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
Returns a list of entities that have been affected by the specified events, based on the * specified filter criteria. Entities can refer to individual customer resources, groups of - * customer resources, or any other construct, depending on the AWS service. Events that + * customer resources, or any other construct, depending on the Amazon Web Services service. Events that * have impact beyond that of the affected entities, or where the extent of impact is unknown, * include at least one entity indicating this.
- *At least one event ARN is required. Results are sorted by the
- * lastUpdatedTime
of the entity, starting with the most recent.
At least one event ARN is required.
* *This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns a list of entities that have been affected by one or more events for one or more - * accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer + * accounts in your organization in Organizations, based on the filter criteria. Entities can refer * to individual customer resources, groups of customer resources, or any other construct, - * depending on the AWS service.
- *At least one event Amazon Resource Name (ARN) and account ID are required. Results are
- * sorted by the lastUpdatedTime
of the entity, starting with the most
- * recent.
Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization + * depending on the Amazon Web Services service.
+ *At least one event Amazon Resource Name (ARN) and account ID are required.
+ *Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization * operation from your organization's management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns the number of entities that are affected by each of the specified events. If no - * events are specified, the counts of all affected entities are returned.
+ *Returns the number of entities that are affected by each of the specified events.
*/ public describeEntityAggregates( args: DescribeEntityAggregatesCommandInput, @@ -339,14 +335,14 @@ export class Health extends HealthClient { /** *Returns detailed information about one or more specified events. Information includes - * standard event data (AWS Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata + * standard event data (Amazon Web Services Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata * that depends upon the nature of the event. Affected entities are not included. To retrieve * the entities, use the DescribeAffectedEntities operation.
*If a specified event can't be retrieved, an error message is returned for that * event.
*This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns detailed information about one or more specified events for one or more AWS - * accounts in your organization. This information includes standard event data (such as the - * AWS Region and service), an event description, and (depending on the event) possible + *
Returns detailed information about one or more specified events for one or more + * Amazon Web Services accounts in your organization. This information includes standard event data (such as the + * Amazon Web Services Region and service), an event description, and (depending on the event) possible * metadata. This operation doesn't return affected entities, such as the resources related to * the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization operation.
*Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*When you call the DescribeEventDetailsForOrganization
operation, specify
* the organizationEventDetailFilters
object in the request. Depending on the
- * AWS Health event type, note the following differences:
To return event details for a public event, you must specify a null value for the
* awsAccountId
parameter. If you specify an account ID for a public
- * event, AWS Health returns an error message because public events aren't specific to
+ * event, Health returns an error message because public events aren't specific to
* an account.
To return event details for an event that is specific to an account in your
* organization, you must specify the awsAccountId
parameter in the
- * request. If you don't specify an account ID, AWS Health returns an error message
+ * request. If you don't specify an account ID, Health returns an error message
* because the event is specific to an account in your organization.
For more information, see Event.
* *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*When you call the DescribeEvents
operation and specify an entity
- * for the entityValues
parameter, AWS Health might return public
+ * for the entityValues
parameter, Health might return public
* events that aren't specific to that resource. For example, if you call
* DescribeEvents
and specify an ID for an Amazon Elastic Compute Cloud (Amazon EC2)
- * instance, AWS Health might return events that aren't specific to that resource or
+ * instance, Health might return events that aren't specific to that resource or
* service. To get events that are specific to a service, use the
* services
parameter in the filter
object. For more
* information, see Event.
Returns information about events across your organization in AWS Organizations. You can use + *
Returns information about events across your organization in Organizations. You can use
* thefilters
parameter to specify the events that you want to return. Events
* are returned in a summary form and don't include the affected accounts, detailed
* description, any additional metadata that depends on the event type, or any affected
@@ -522,9 +518,9 @@ export class Health extends HealthClient {
*
If you don't specify a filter
, the
* DescribeEventsForOrganizations
returns all events across your organization.
* Results are sorted by lastModifiedTime
, starting with the most recent event.
For more information about the different types of AWS Health events, see Event.
- *Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
For more information about the different types of Health events, see Event.
+ *Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
Returns the event types that meet the specified filter criteria. You can use this API - * operation to find information about the AWS Health event, such as the category, AWS + * operation to find information about the Health event, such as the category, Amazon Web Services * service, and event code. The metadata for each event appears in the EventType object.
*If you don't specify a filter criteria, the API operation returns all event types, in no * particular order.
@@ -599,7 +595,7 @@ export class Health extends HealthClient { } /** - *This operation provides status information on enabling or disabling AWS Health to work + *
This operation provides status information on enabling or disabling Health to work * with your organization. To call this operation, you must sign in as an IAM user, assume * an IAM role, or sign in as the root user (not recommended) in the organization's * management account.
@@ -636,20 +632,19 @@ export class Health extends HealthClient { } /** - *Disables AWS Health from working with AWS Organizations. To call this operation, you must sign - * in as an AWS Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not + *
Disables Health from working with Organizations. To call this operation, you must sign + * in as an Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not * recommended) in the organization's management account. For more information, see Aggregating - * AWS Health events in the - * AWS Health User Guide.
- *This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or AWS Command Line Interface (AWS CLI) to + * Health events in the + * Health User Guide.
+ *This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or Command Line Interface (CLI) to * remove the service-linked role. For more information, see Deleting a Service-Linked Role in the * IAM User Guide.
*You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, - * AWS Health stops aggregating events for all other AWS accounts in your organization. - * If you call the AWS Health API operations for organizational view, AWS Health returns - * an error. AWS Health continues to aggregate health events for your AWS - * account.
+ * Health stops aggregating events for all other Amazon Web Services accounts in your organization. + * If you call the Health API operations for organizational view, Health returns + * an error. Health continues to aggregate health events for your Amazon Web Services account. *Enables AWS Health to work with AWS Organizations. You can use the organizational view feature - * to aggregate events from all AWS accounts in your organization in a centralized location.
+ *Enables Health to work with Organizations. You can use the organizational view feature + * to aggregate events from all Amazon Web Services accounts in your organization in a centralized location.
*This operation also creates a service-linked role for the management account in the * organization.
*To call this operation, you must meet the following requirements:
*You must have a Business or Enterprise Support plan from AWS Support to use the AWS Health - * API. If you call the AWS Health API from an AWS account that doesn't have a + *
You must have a Business or Enterprise Support plan from Amazon Web Services Support to use the Health
+ * API. If you call the Health API from an Amazon Web Services account that doesn't have a
* Business or Enterprise Support plan, you receive a
* SubscriptionRequiredException
error.
You must have permission to call this operation from the organization's - * management account. For example IAM policies, see AWS Health + * management account. For example IAM policies, see Health * identity-based policy examples.
*If you don't have the required support plan, you can instead use the AWS Health console + *
If you don't have the required support plan, you can instead use the Health console * to enable the organizational view feature. For more information, see Aggregating - * AWS Health events in the AWS Health User Guide.
+ * Health events in the Health User Guide. */ public enableHealthServiceAccessForOrganization( args: EnableHealthServiceAccessForOrganizationCommandInput, diff --git a/clients/client-health/src/HealthClient.ts b/clients/client-health/src/HealthClient.ts index 8c14909e7cdbf..f90c6fb94f43a 100644 --- a/clients/client-health/src/HealthClient.ts +++ b/clients/client-health/src/HealthClient.ts @@ -276,46 +276,46 @@ type HealthClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandle export interface HealthClientResolvedConfig extends HealthClientResolvedConfigType {} /** - *The AWS Health API provides programmatic access to the AWS Health information that - * appears in the AWS Personal Health Dashboard. You - * can use the API operations to get information about AWS Health events that affect your - * AWS services and resources.
+ *The Health API provides programmatic access to the Health information that + * appears in the Personal Health Dashboard. You + * can use the API operations to get information about events that might affect your Amazon Web Services + * services and resources.
*You must have a Business or Enterprise Support plan from AWS Support to use the - * AWS Health API. If you call the AWS Health API from an AWS account that + *
You must have a Business or Enterprise Support plan from Amazon Web Services Support to use the
+ * Health API. If you call the Health API from an Amazon Web Services account that
* doesn't have a Business or Enterprise Support plan, you receive a
* SubscriptionRequiredException
error.
You can use the AWS Health endpoint health.us-east-1.amazonaws.com (HTTPS) to - * call the AWS Health API operations. AWS Health supports a multi-Region + *
You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) to + * call the Health API operations. Health supports a multi-Region * application architecture and has two regional endpoints in an active-passive * configuration. You can use the high availability endpoint example to determine - * which AWS Region is active, so that you can get the latest information from the - * API. For more information, see Accessing the AWS Health API in the - * AWS Health User Guide.
+ * which Amazon Web Services Region is active, so that you can get the latest information from the + * API. For more information, see Accessing the Health API in the + * Health User Guide. *For authentication of requests, AWS Health uses the Signature Version 4 Signing
+ * For authentication of requests, Health uses the Signature Version 4 Signing
* Process. If your AWS account is part of AWS Organizations, you can use the AWS Health organizational
- * view feature. This feature provides a centralized view of AWS Health events across all
- * accounts in your organization. You can aggregate AWS Health events in real time to
+ * If your Amazon Web Services account is part of Organizations, you can use the Health organizational
+ * view feature. This feature provides a centralized view of Health events across all
+ * accounts in your organization. You can aggregate Health events in real time to
* identify accounts in your organization that are affected by an operational event or get
* notified of security vulnerabilities. Use the organizational view API operations to enable
* this feature and return event information. For more information, see Aggregating
- * AWS Health events in the AWS Health User Guide.
When you use the AWS Health API operations to return AWS Health events, see the + *
When you use the Health API operations to return Health events, see the * following recommendations:
*Use the eventScopeCode parameter to specify whether to return AWS Health + *
Use the eventScopeCode parameter to specify whether to return Health * events that are public or account-specific.
*Returns a list of accounts in the organization from AWS Organizations that are affected by the - * provided event. For more information about the different types of AWS Health events, see + *
Returns a list of accounts in the organization from Organizations that are affected by the + * provided event. For more information about the different types of Health events, see * Event.
- *Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
Returns a list of entities that have been affected by the specified events, based on the * specified filter criteria. Entities can refer to individual customer resources, groups of - * customer resources, or any other construct, depending on the AWS service. Events that + * customer resources, or any other construct, depending on the Amazon Web Services service. Events that * have impact beyond that of the affected entities, or where the extent of impact is unknown, * include at least one entity indicating this.
- *At least one event ARN is required. Results are sorted by the
- * lastUpdatedTime
of the entity, starting with the most recent.
At least one event ARN is required.
* *This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns a list of entities that have been affected by one or more events for one or more - * accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer + * accounts in your organization in Organizations, based on the filter criteria. Entities can refer * to individual customer resources, groups of customer resources, or any other construct, - * depending on the AWS service.
- *At least one event Amazon Resource Name (ARN) and account ID are required. Results are
- * sorted by the lastUpdatedTime
of the entity, starting with the most
- * recent.
Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization + * depending on the Amazon Web Services service.
+ *At least one event Amazon Resource Name (ARN) and account ID are required.
+ *Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization * operation from your organization's management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns the number of entities that are affected by each of the specified events. If no - * events are specified, the counts of all affected entities are returned.
+ *Returns the number of entities that are affected by each of the specified events.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-health/src/commands/DescribeEventDetailsCommand.ts b/clients/client-health/src/commands/DescribeEventDetailsCommand.ts index 056bd1a1cb3b1..29d786bb02fae 100644 --- a/clients/client-health/src/commands/DescribeEventDetailsCommand.ts +++ b/clients/client-health/src/commands/DescribeEventDetailsCommand.ts @@ -23,14 +23,14 @@ export interface DescribeEventDetailsCommandOutput extends DescribeEventDetailsR /** *Returns detailed information about one or more specified events. Information includes - * standard event data (AWS Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata + * standard event data (Amazon Web Services Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata * that depends upon the nature of the event. Affected entities are not included. To retrieve * the entities, use the DescribeAffectedEntities operation.
*If a specified event can't be retrieved, an error message is returned for that * event.
*This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns detailed information about one or more specified events for one or more AWS - * accounts in your organization. This information includes standard event data (such as the - * AWS Region and service), an event description, and (depending on the event) possible + *
Returns detailed information about one or more specified events for one or more + * Amazon Web Services accounts in your organization. This information includes standard event data (such as the + * Amazon Web Services Region and service), an event description, and (depending on the event) possible * metadata. This operation doesn't return affected entities, such as the resources related to * the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization operation.
*Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*When you call the DescribeEventDetailsForOrganization
operation, specify
* the organizationEventDetailFilters
object in the request. Depending on the
- * AWS Health event type, note the following differences:
To return event details for a public event, you must specify a null value for the
* awsAccountId
parameter. If you specify an account ID for a public
- * event, AWS Health returns an error message because public events aren't specific to
+ * event, Health returns an error message because public events aren't specific to
* an account.
To return event details for an event that is specific to an account in your
* organization, you must specify the awsAccountId
parameter in the
- * request. If you don't specify an account ID, AWS Health returns an error message
+ * request. If you don't specify an account ID, Health returns an error message
* because the event is specific to an account in your organization.
For more information, see Event.
* *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more - * information, see Resource- and action-based conditions in the AWS Health User Guide.
+ *This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more + * information, see Resource- and action-based conditions in the Health User Guide.
*Returns the event types that meet the specified filter criteria. You can use this API - * operation to find information about the AWS Health event, such as the category, AWS + * operation to find information about the Health event, such as the category, Amazon Web Services * service, and event code. The metadata for each event appears in the EventType object.
*If you don't specify a filter criteria, the API operation returns all event types, in no * particular order.
diff --git a/clients/client-health/src/commands/DescribeEventsCommand.ts b/clients/client-health/src/commands/DescribeEventsCommand.ts index 2bdf2cd8bd2b2..fd205be50fea3 100644 --- a/clients/client-health/src/commands/DescribeEventsCommand.ts +++ b/clients/client-health/src/commands/DescribeEventsCommand.ts @@ -32,10 +32,10 @@ export interface DescribeEventsCommandOutput extends DescribeEventsResponse, __M *When you call the DescribeEvents
operation and specify an entity
- * for the entityValues
parameter, AWS Health might return public
+ * for the entityValues
parameter, Health might return public
* events that aren't specific to that resource. For example, if you call
* DescribeEvents
and specify an ID for an Amazon Elastic Compute Cloud (Amazon EC2)
- * instance, AWS Health might return events that aren't specific to that resource or
+ * instance, Health might return events that aren't specific to that resource or
* service. To get events that are specific to a service, use the
* services
parameter in the filter
object. For more
* information, see Event.
Returns information about events across your organization in AWS Organizations. You can use + *
Returns information about events across your organization in Organizations. You can use
* thefilters
parameter to specify the events that you want to return. Events
* are returned in a summary form and don't include the affected accounts, detailed
* description, any additional metadata that depends on the event type, or any affected
@@ -49,9 +49,9 @@ export interface DescribeEventsForOrganizationCommandOutput
*
If you don't specify a filter
, the
* DescribeEventsForOrganizations
returns all events across your organization.
* Results are sorted by lastModifiedTime
, starting with the most recent event.
For more information about the different types of AWS Health events, see Event.
- *Before you can call this operation, you must first enable AWS Health to work with - * AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's + *
For more information about the different types of Health events, see Event.
+ *Before you can call this operation, you must first enable Health to work with + * Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's * management account.
*This API operation uses pagination. Specify the nextToken
parameter in the next request to return more results.
This operation provides status information on enabling or disabling AWS Health to work + *
This operation provides status information on enabling or disabling Health to work * with your organization. To call this operation, you must sign in as an IAM user, assume * an IAM role, or sign in as the root user (not recommended) in the organization's * management account.
diff --git a/clients/client-health/src/commands/DisableHealthServiceAccessForOrganizationCommand.ts b/clients/client-health/src/commands/DisableHealthServiceAccessForOrganizationCommand.ts index d7a934cac727b..f8e03c44819b7 100644 --- a/clients/client-health/src/commands/DisableHealthServiceAccessForOrganizationCommand.ts +++ b/clients/client-health/src/commands/DisableHealthServiceAccessForOrganizationCommand.ts @@ -21,20 +21,19 @@ export interface DisableHealthServiceAccessForOrganizationCommandInput {} export interface DisableHealthServiceAccessForOrganizationCommandOutput extends __MetadataBearer {} /** - *Disables AWS Health from working with AWS Organizations. To call this operation, you must sign - * in as an AWS Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not + *
Disables Health from working with Organizations. To call this operation, you must sign + * in as an Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not * recommended) in the organization's management account. For more information, see Aggregating - * AWS Health events in the - * AWS Health User Guide.
- *This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or AWS Command Line Interface (AWS CLI) to + * Health events in the + * Health User Guide.
+ *This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or Command Line Interface (CLI) to * remove the service-linked role. For more information, see Deleting a Service-Linked Role in the * IAM User Guide.
*You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, - * AWS Health stops aggregating events for all other AWS accounts in your organization. - * If you call the AWS Health API operations for organizational view, AWS Health returns - * an error. AWS Health continues to aggregate health events for your AWS - * account.
+ * Health stops aggregating events for all other Amazon Web Services accounts in your organization. + * If you call the Health API operations for organizational view, Health returns + * an error. Health continues to aggregate health events for your Amazon Web Services account. *Enables AWS Health to work with AWS Organizations. You can use the organizational view feature - * to aggregate events from all AWS accounts in your organization in a centralized location.
+ *Enables Health to work with Organizations. You can use the organizational view feature + * to aggregate events from all Amazon Web Services accounts in your organization in a centralized location.
*This operation also creates a service-linked role for the management account in the * organization.
*To call this operation, you must meet the following requirements:
*You must have a Business or Enterprise Support plan from AWS Support to use the AWS Health - * API. If you call the AWS Health API from an AWS account that doesn't have a + *
You must have a Business or Enterprise Support plan from Amazon Web Services Support to use the Health
+ * API. If you call the Health API from an Amazon Web Services account that doesn't have a
* Business or Enterprise Support plan, you receive a
* SubscriptionRequiredException
error.
You must have permission to call this operation from the organization's - * management account. For example IAM policies, see AWS Health + * management account. For example IAM policies, see Health * identity-based policy examples.
*If you don't have the required support plan, you can instead use the AWS Health console + *
If you don't have the required support plan, you can instead use the Health console * to enable the organizational view feature. For more information, see Aggregating - * AWS Health events in the AWS Health User Guide.
+ * Health events in the Health User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-health/src/endpoints.ts b/clients/client-health/src/endpoints.ts index 4e3575a7ea60b..95f090f5238f2 100644 --- a/clients/client-health/src/endpoints.ts +++ b/clients/client-health/src/endpoints.ts @@ -79,6 +79,10 @@ const partitionHash: PartitionHash = { hostname: "health.{region}.c2s.ic.gov", tags: [], }, + { + hostname: "health-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, ], }, "aws-iso-b": { @@ -89,6 +93,10 @@ const partitionHash: PartitionHash = { hostname: "health.{region}.sc2s.sgov.gov", tags: [], }, + { + hostname: "health-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, ], }, "aws-us-gov": { diff --git a/clients/client-health/src/models/models_0.ts b/clients/client-health/src/models/models_0.ts index 3c17345f4b379..21f0ecef20a0e 100644 --- a/clients/client-health/src/models/models_0.ts +++ b/clients/client-health/src/models/models_0.ts @@ -40,7 +40,7 @@ export interface AffectedEntity { entityUrl?: string; /** - *The 12-digit AWS account number that contains the affected entity.
+ *The 12-digit Amazon Web Services account number that contains the affected entity.
*/ awsAccountId?: string; @@ -122,7 +122,7 @@ export interface DescribeAffectedAccountsForOrganizationResponse { affectedAccounts?: string[]; /** - *This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.
+ *This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
*If the eventScopeCode
value is PUBLIC
, then the
@@ -130,9 +130,9 @@ export interface DescribeAffectedAccountsForOrganizationResponse {
*
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then
- * the affectedAccounts
value lists the affected AWS accounts in your
+ * the affectedAccounts
value lists the affected Amazon Web Services accounts in your
* organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you
- * have AWS accounts that use that service, those account IDs appear in the
+ * have Amazon Web Services accounts that use that service, those account IDs appear in the
* response.
The values to use to filter results from the EntityFilter + *
The values to use to filter results from the DescribeAffectedEntities * operation.
*/ export interface EntityFilter { @@ -353,7 +353,7 @@ export interface EventAccountFilter { eventArn: string | undefined; /** - *The 12-digit AWS account numbers that contains the affected entities.
+ *The 12-digit Amazon Web Services account numbers that contains the affected entities.
*/ awsAccountId?: string; } @@ -408,7 +408,7 @@ export namespace DescribeAffectedEntitiesForOrganizationRequest { */ export interface OrganizationAffectedEntitiesErrorItem { /** - *The 12-digit AWS account numbers that contains the affected entities.
+ *The 12-digit Amazon Web Services account numbers that contains the affected entities.
*/ awsAccountId?: string; @@ -577,17 +577,17 @@ export interface EventFilter { eventTypeCodes?: string[]; /** - *The AWS services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
A list of AWS Regions.
+ *A list of Amazon Web Services Regions.
*/ regions?: string[]; /** - *A list of AWS Availability Zones.
+ *A list of Amazon Web Services Availability Zones.
*/ availabilityZones?: string[]; @@ -618,8 +618,9 @@ export interface EventFilter { entityValues?: string[]; /** - *A list of event type category codes (issue
, scheduledChange
,
- * or accountNotification
).
A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
Summary information about an AWS Health event.
- *AWS Health events can be public or account-specific:
+ *Summary information about an Health event.
+ *Health events can be public or account-specific:
** Public events might be service events that are not specific - * to an AWS account. For example, if there is an issue with an AWS Region, - * AWS Health provides information about the event, even if you don't use services or + * to an Amazon Web Services account. For example, if there is an issue with an Amazon Web Services Region, + * Health provides information about the event, even if you don't use services or * resources in that Region.
*- * Account-specific events are specific to either your AWS - * account or an account in your organization. For example, if there's an issue with - * Amazon Elastic Compute Cloud in a Region that you use, AWS Health provides information about the event + * Account-specific events are specific to either your Amazon Web Services account or an account in your organization. For example, if there's an issue with + * Amazon Elastic Compute Cloud in a Region that you use, Health provides information about the event * and the affected resources in the account.
*The AWS service that is affected by the event. For example, EC2
, RDS
.
The Amazon Web Services service that is affected by the event. For example, EC2
, RDS
.
The category of the event. Possible values are issue
,
- * scheduledChange
, and accountNotification
.
A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
The AWS Region name of the event.
+ *The Amazon Web Services Region name of the event.
*/ region?: string; /** - *The AWS Availability Zone of the event. For example, us-east-1a.
+ *The Amazon Web Services Availability Zone of the event. For example, us-east-1a.
*/ availabilityZone?: string; @@ -870,7 +871,7 @@ export interface Event { statusCode?: EventStatusCode | string; /** - *This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.
+ *This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
*If the eventScopeCode
value is PUBLIC
, then the
@@ -878,9 +879,9 @@ export interface Event {
*
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then
- * the affectedAccounts
value lists the affected AWS accounts in your
+ * the affectedAccounts
value lists the affected Amazon Web Services accounts in your
* organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you
- * have AWS accounts that use that service, those account IDs appear in the
+ * have Amazon Web Services accounts that use that service, those account IDs appear in the
* response.
A message that describes the error.
- *If you call the DescribeEventDetailsForOrganization
- * operation and receive one of the following errors, follow the recommendations in the message:
If you call the DescribeEventDetailsForOrganization
operation and receive one of the following errors, follow the recommendations in the message:
We couldn't find a public event that matches your request. To find an event that is account specific, you must enter an AWS account ID in the request.
+ *We couldn't find a public event that matches your request. To find an event that is account specific, you must enter an Amazon Web Services account ID in the request.
*We couldn't find an account specific event for the specified AWS account. To find an event that is public, you must enter a null value for the AWS account ID in the request.
+ *We couldn't find an account specific event for the specified Amazon Web Services account. To find an event that is public, you must enter a null value for the Amazon Web Services account ID in the request.
*Your AWS account doesn't include the AWS Support plan required to use the AWS Health API. You must have either a Business or Enterprise Support plan.
+ *Your Amazon Web Services account doesn't include the Amazon Web Services Support plan required to use the Health API. You must have either a Business or Enterprise Support plan.
*The 12-digit AWS account numbers that contains the affected entities.
+ *The 12-digit Amazon Web Services account numbers that contains the affected entities.
*/ awsAccountId?: string; /** - *Summary information about an AWS Health event.
- *AWS Health events can be public or account-specific:
+ *Summary information about an Health event.
+ *Health events can be public or account-specific:
** Public events might be service events that are not specific - * to an AWS account. For example, if there is an issue with an AWS Region, - * AWS Health provides information about the event, even if you don't use services or + * to an Amazon Web Services account. For example, if there is an issue with an Amazon Web Services Region, + * Health provides information about the event, even if you don't use services or * resources in that Region.
*- * Account-specific events are specific to either your AWS - * account or an account in your organization. For example, if there's an issue with - * Amazon Elastic Compute Cloud in a Region that you use, AWS Health provides information about the event + * Account-specific events are specific to either your Amazon Web Services account or an account in your organization. For example, if there's an issue with + * Amazon Elastic Compute Cloud in a Region that you use, Health provides information about the event * and the affected resources in the account.
*A list of 12-digit AWS account numbers that contains the affected entities.
+ *A list of 12-digit Amazon Web Services account numbers that contains the affected entities.
*/ awsAccountIds?: string[]; /** - *The AWS services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
A list of AWS Regions.
+ *A list of Amazon Web Services Regions.
*/ regions?: string[]; @@ -1254,7 +1253,9 @@ export interface OrganizationEventFilter { entityValues?: string[]; /** - *A list of event type category codes (issue, scheduledChange, or accountNotification).
+ *A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
The AWS service that is affected by the event, such as EC2 and RDS.
+ *The Amazon Web Services service that is affected by the event, such as EC2 and RDS.
*/ service?: string; @@ -1336,12 +1337,14 @@ export interface OrganizationEvent { eventTypeCode?: string; /** - *The category of the event type.
+ *A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.
+ *This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.
*If the eventScopeCode
value is PUBLIC
, then the
@@ -1349,9 +1352,9 @@ export interface OrganizationEvent {
*
If the eventScopeCode
value is ACCOUNT_SPECIFIC
, then
- * the affectedAccounts
value lists the affected AWS accounts in your
+ * the affectedAccounts
value lists the affected Amazon Web Services accounts in your
* organization. For example, if an event affects a service such as Amazon Elastic Compute Cloud and you
- * have AWS accounts that use that service, those account IDs appear in the
+ * have Amazon Web Services accounts that use that service, those account IDs appear in the
* response.
The AWS Region name of the event.
+ *The Amazon Web Services Region name of the event.
*/ region?: string; @@ -1434,13 +1437,14 @@ export interface EventTypeFilter { eventTypeCodes?: string[]; /** - *The AWS services associated with the event. For example, EC2
, RDS
.
The Amazon Web Services services associated with the event. For example, EC2
, RDS
.
A list of event type category codes (issue
, scheduledChange
,
- * or accountNotification
).
A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
Contains the metadata about a type of event that is reported by AWS Health. The + *
Contains the metadata about a type of event that is reported by Health. The
* EventType
shows the category, service, and the event type code of the
* event. For example, an issue
might be the category, EC2
the
* service, and AWS_EC2_SYSTEM_MAINTENANCE_EVENT
the event type code.
You can use the DescribeEventTypes API operation to return this information * about an event.
*You can also use the Amazon CloudWatch Events console to create a rule so that you can get notified or - * take action when AWS Health delivers a specific event to your AWS account. For more - * information, see Monitor for AWS Health events with Amazon CloudWatch Events in the - * AWS Health User Guide.
+ * take action when Health delivers a specific event to your Amazon Web Services account. For more + * information, see Monitor for Health events with Amazon CloudWatch Events in the + * Health User Guide. */ export interface EventType { /** - *The AWS service that is affected by the event. For example, EC2
, RDS
.
The Amazon Web Services service that is affected by the event. For example, EC2
, RDS
.
A list of event type category codes (issue
, scheduledChange
,
- * or accountNotification
).
A list of event type category codes. Possible values are
+ * issue
, accountNotification
, or scheduledChange
. Currently,
+ * the investigation
value isn't supported at this time.
Information about the status of enabling or disabling AWS Health Organizational View in + *
Information about the status of enabling or disabling the Health organizational + * view feature in * your organization.
*Valid values are ENABLED | DISABLED | PENDING
.
AWS Resilience Hub helps you proactively prepare and protect your Amazon Web Services applications from +disruptions. Resilience Hub offers continuous resiliency assessment and validation that integrates +into your software development lifecycle. This enables you to uncover resiliency weaknesses, +ensure recovery time objective (RTO) and recovery point objective (RPO) targets for your +applications are met, and resolve issues before they are released into production.
+ +## Installing + +To install the this package, simply type add or install @aws-sdk/client-resiliencehub +using your favorite package manager: + +- `npm install @aws-sdk/client-resiliencehub` +- `yarn add @aws-sdk/client-resiliencehub` +- `pnpm add @aws-sdk/client-resiliencehub` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `ResiliencehubClient` and +the commands you need, for example `AddDraftAppVersionResourceMappingsCommand`: + +```js +// ES5 example +const { ResiliencehubClient, AddDraftAppVersionResourceMappingsCommand } = require("@aws-sdk/client-resiliencehub"); +``` + +```ts +// ES6+ example +import { ResiliencehubClient, AddDraftAppVersionResourceMappingsCommand } from "@aws-sdk/client-resiliencehub"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new ResiliencehubClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new AddDraftAppVersionResourceMappingsCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-resiliencehub"; +const client = new AWS.Resiliencehub({ region: "REGION" }); + +// async/await. +try { + const data = await client.addDraftAppVersionResourceMappings(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .addDraftAppVersionResourceMappings(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.addDraftAppVersionResourceMappings(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-resiliencehub` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-resiliencehub/jest.config.js b/clients/client-resiliencehub/jest.config.js new file mode 100644 index 0000000000000..02eed352c6a86 --- /dev/null +++ b/clients/client-resiliencehub/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-resiliencehub/package.json b/clients/client-resiliencehub/package.json new file mode 100644 index 0000000000000..59c9cea07aa5e --- /dev/null +++ b/clients/client-resiliencehub/package.json @@ -0,0 +1,96 @@ +{ + "name": "@aws-sdk/client-resiliencehub", + "description": "AWS SDK for JavaScript Resiliencehub Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.40.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.40.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.40.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0", + "uuid": "^8.3.2" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "@types/uuid": "^8.3.0", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-resiliencehub", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-resiliencehub" + } +} diff --git a/clients/client-resiliencehub/src/Resiliencehub.ts b/clients/client-resiliencehub/src/Resiliencehub.ts new file mode 100644 index 0000000000000..777976b4eabf0 --- /dev/null +++ b/clients/client-resiliencehub/src/Resiliencehub.ts @@ -0,0 +1,1424 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + AddDraftAppVersionResourceMappingsCommand, + AddDraftAppVersionResourceMappingsCommandInput, + AddDraftAppVersionResourceMappingsCommandOutput, +} from "./commands/AddDraftAppVersionResourceMappingsCommand"; +import { CreateAppCommand, CreateAppCommandInput, CreateAppCommandOutput } from "./commands/CreateAppCommand"; +import { + CreateRecommendationTemplateCommand, + CreateRecommendationTemplateCommandInput, + CreateRecommendationTemplateCommandOutput, +} from "./commands/CreateRecommendationTemplateCommand"; +import { + CreateResiliencyPolicyCommand, + CreateResiliencyPolicyCommandInput, + CreateResiliencyPolicyCommandOutput, +} from "./commands/CreateResiliencyPolicyCommand"; +import { + DeleteAppAssessmentCommand, + DeleteAppAssessmentCommandInput, + DeleteAppAssessmentCommandOutput, +} from "./commands/DeleteAppAssessmentCommand"; +import { DeleteAppCommand, DeleteAppCommandInput, DeleteAppCommandOutput } from "./commands/DeleteAppCommand"; +import { + DeleteRecommendationTemplateCommand, + DeleteRecommendationTemplateCommandInput, + DeleteRecommendationTemplateCommandOutput, +} from "./commands/DeleteRecommendationTemplateCommand"; +import { + DeleteResiliencyPolicyCommand, + DeleteResiliencyPolicyCommandInput, + DeleteResiliencyPolicyCommandOutput, +} from "./commands/DeleteResiliencyPolicyCommand"; +import { + DescribeAppAssessmentCommand, + DescribeAppAssessmentCommandInput, + DescribeAppAssessmentCommandOutput, +} from "./commands/DescribeAppAssessmentCommand"; +import { DescribeAppCommand, DescribeAppCommandInput, DescribeAppCommandOutput } from "./commands/DescribeAppCommand"; +import { + DescribeAppVersionResourcesResolutionStatusCommand, + DescribeAppVersionResourcesResolutionStatusCommandInput, + DescribeAppVersionResourcesResolutionStatusCommandOutput, +} from "./commands/DescribeAppVersionResourcesResolutionStatusCommand"; +import { + DescribeAppVersionTemplateCommand, + DescribeAppVersionTemplateCommandInput, + DescribeAppVersionTemplateCommandOutput, +} from "./commands/DescribeAppVersionTemplateCommand"; +import { + DescribeDraftAppVersionResourcesImportStatusCommand, + DescribeDraftAppVersionResourcesImportStatusCommandInput, + DescribeDraftAppVersionResourcesImportStatusCommandOutput, +} from "./commands/DescribeDraftAppVersionResourcesImportStatusCommand"; +import { + DescribeResiliencyPolicyCommand, + DescribeResiliencyPolicyCommandInput, + DescribeResiliencyPolicyCommandOutput, +} from "./commands/DescribeResiliencyPolicyCommand"; +import { + ImportResourcesToDraftAppVersionCommand, + ImportResourcesToDraftAppVersionCommandInput, + ImportResourcesToDraftAppVersionCommandOutput, +} from "./commands/ImportResourcesToDraftAppVersionCommand"; +import { + ListAlarmRecommendationsCommand, + ListAlarmRecommendationsCommandInput, + ListAlarmRecommendationsCommandOutput, +} from "./commands/ListAlarmRecommendationsCommand"; +import { + ListAppAssessmentsCommand, + ListAppAssessmentsCommandInput, + ListAppAssessmentsCommandOutput, +} from "./commands/ListAppAssessmentsCommand"; +import { + ListAppComponentCompliancesCommand, + ListAppComponentCompliancesCommandInput, + ListAppComponentCompliancesCommandOutput, +} from "./commands/ListAppComponentCompliancesCommand"; +import { + ListAppComponentRecommendationsCommand, + ListAppComponentRecommendationsCommandInput, + ListAppComponentRecommendationsCommandOutput, +} from "./commands/ListAppComponentRecommendationsCommand"; +import { ListAppsCommand, ListAppsCommandInput, ListAppsCommandOutput } from "./commands/ListAppsCommand"; +import { + ListAppVersionResourceMappingsCommand, + ListAppVersionResourceMappingsCommandInput, + ListAppVersionResourceMappingsCommandOutput, +} from "./commands/ListAppVersionResourceMappingsCommand"; +import { + ListAppVersionResourcesCommand, + ListAppVersionResourcesCommandInput, + ListAppVersionResourcesCommandOutput, +} from "./commands/ListAppVersionResourcesCommand"; +import { + ListAppVersionsCommand, + ListAppVersionsCommandInput, + ListAppVersionsCommandOutput, +} from "./commands/ListAppVersionsCommand"; +import { + ListRecommendationTemplatesCommand, + ListRecommendationTemplatesCommandInput, + ListRecommendationTemplatesCommandOutput, +} from "./commands/ListRecommendationTemplatesCommand"; +import { + ListResiliencyPoliciesCommand, + ListResiliencyPoliciesCommandInput, + ListResiliencyPoliciesCommandOutput, +} from "./commands/ListResiliencyPoliciesCommand"; +import { + ListSopRecommendationsCommand, + ListSopRecommendationsCommandInput, + ListSopRecommendationsCommandOutput, +} from "./commands/ListSopRecommendationsCommand"; +import { + ListSuggestedResiliencyPoliciesCommand, + ListSuggestedResiliencyPoliciesCommandInput, + ListSuggestedResiliencyPoliciesCommandOutput, +} from "./commands/ListSuggestedResiliencyPoliciesCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListTestRecommendationsCommand, + ListTestRecommendationsCommandInput, + ListTestRecommendationsCommandOutput, +} from "./commands/ListTestRecommendationsCommand"; +import { + ListUnsupportedAppVersionResourcesCommand, + ListUnsupportedAppVersionResourcesCommandInput, + ListUnsupportedAppVersionResourcesCommandOutput, +} from "./commands/ListUnsupportedAppVersionResourcesCommand"; +import { + PublishAppVersionCommand, + PublishAppVersionCommandInput, + PublishAppVersionCommandOutput, +} from "./commands/PublishAppVersionCommand"; +import { + PutDraftAppVersionTemplateCommand, + PutDraftAppVersionTemplateCommandInput, + PutDraftAppVersionTemplateCommandOutput, +} from "./commands/PutDraftAppVersionTemplateCommand"; +import { + RemoveDraftAppVersionResourceMappingsCommand, + RemoveDraftAppVersionResourceMappingsCommandInput, + RemoveDraftAppVersionResourceMappingsCommandOutput, +} from "./commands/RemoveDraftAppVersionResourceMappingsCommand"; +import { + ResolveAppVersionResourcesCommand, + ResolveAppVersionResourcesCommandInput, + ResolveAppVersionResourcesCommandOutput, +} from "./commands/ResolveAppVersionResourcesCommand"; +import { + StartAppAssessmentCommand, + StartAppAssessmentCommandInput, + StartAppAssessmentCommandOutput, +} from "./commands/StartAppAssessmentCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { UpdateAppCommand, UpdateAppCommandInput, UpdateAppCommandOutput } from "./commands/UpdateAppCommand"; +import { + UpdateResiliencyPolicyCommand, + UpdateResiliencyPolicyCommandInput, + UpdateResiliencyPolicyCommandOutput, +} from "./commands/UpdateResiliencyPolicyCommand"; +import { ResiliencehubClient } from "./ResiliencehubClient"; + +/** + *AWS Resilience Hub helps you proactively prepare and protect your Amazon Web Services applications from + * disruptions. Resilience Hub offers continuous resiliency assessment and validation that integrates + * into your software development lifecycle. This enables you to uncover resiliency weaknesses, + * ensure recovery time objective (RTO) and recovery point objective (RPO) targets for your + * applications are met, and resolve issues before they are released into production.
+ */ +export class Resiliencehub extends ResiliencehubClient { + /** + *Adds the resource mapping for the draft application version.
+ */ + public addDraftAppVersionResourceMappings( + args: AddDraftAppVersionResourceMappingsCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a Resilience Hub application. A Resilience Hub application is a collection of Amazon Web Services + * resources structured to prevent and recover Amazon Web Services application disruptions. To describe a + * Resilience Hub application, you provide an application name, resources from one or more–up to + * five–CloudFormation stacks, and an appropriate resiliency policy.
+ * + *After you create a Resilience Hub application, you publish it so that you can run a resiliency + * assessment on it. You can then use recommendations from the assessment to improve resiliency + * by running another assessment, comparing results, and then iterating the process until you + * achieve your goals for recovery time objective (RTO) and recovery point objective + * (RPO).
+ */ + public createApp(args: CreateAppCommandInput, options?: __HttpHandlerOptions): PromiseCreates a new recommendation template.
+ */ + public createRecommendationTemplate( + args: CreateRecommendationTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a resiliency policy for an application.
+ */ + public createResiliencyPolicy( + args: CreateResiliencyPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes an AWS Resilience Hub application. This is a destructive action that can't be + * undone.
+ */ + public deleteApp(args: DeleteAppCommandInput, options?: __HttpHandlerOptions): PromiseDeletes an AWS Resilience Hub application assessment. This is a destructive action that can't + * be undone.
+ */ + public deleteAppAssessment( + args: DeleteAppAssessmentCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes a recommendation template. This is a destructive action that can't be + * undone.
+ */ + public deleteRecommendationTemplate( + args: DeleteRecommendationTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes a resiliency policy. This is a destructive action that can't be undone.
+ */ + public deleteResiliencyPolicy( + args: DeleteResiliencyPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseDescribes an AWS Resilience Hub application.
+ */ + public describeApp(args: DescribeAppCommandInput, options?: __HttpHandlerOptions): PromiseDescribes an assessment for an AWS Resilience Hub application.
+ */ + public describeAppAssessment( + args: DescribeAppAssessmentCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns the resolution status for the specified resolution identifier for an application
+ * version. If resolutionId
is not specified, the current resolution status is
+ * returned.
Describes details about an AWS Resilience Hub
+ */ + public describeAppVersionTemplate( + args: DescribeAppVersionTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseDescribes the status of importing resources to an application version.
+ */ + public describeDraftAppVersionResourcesImportStatus( + args: DescribeDraftAppVersionResourcesImportStatusCommandInput, + options?: __HttpHandlerOptions + ): PromiseDescribes a specified resiliency policy for an AWS Resilience Hub application. The returned + * policy object includes creation time, data location constraints, the Amazon Resource Name + * (ARN) for the policy, tags, tier, and more.
+ */ + public describeResiliencyPolicy( + args: DescribeResiliencyPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseImports resources from sources such as a CloudFormation stack, resource-groups, or application + * registry app to a draft application version.
+ */ + public importResourcesToDraftAppVersion( + args: ImportResourcesToDraftAppVersionCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the alarm recommendations for a AWS Resilience Hub application.
+ */ + public listAlarmRecommendations( + args: ListAlarmRecommendationsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the assessments for an AWS Resilience Hub application. You can use request parameters to + * refine the results for the response object.
+ */ + public listAppAssessments( + args: ListAppAssessmentsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the compliances for an AWS Resilience Hub component.
+ */ + public listAppComponentCompliances( + args: ListAppComponentCompliancesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the recommendations for an AWS Resilience Hub component.
+ */ + public listAppComponentRecommendations( + args: ListAppComponentRecommendationsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists your Resilience Hub applications.
+ */ + public listApps(args: ListAppsCommandInput, options?: __HttpHandlerOptions): PromiseLists how the resources in an application version are mapped/sourced from. Mappings can be + * physical resource identifiers, CloudFormation stacks, resource-groups, or an application registry + * app.
+ */ + public listAppVersionResourceMappings( + args: ListAppVersionResourceMappingsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists all the resources in an application version.
+ */ + public listAppVersionResources( + args: ListAppVersionResourcesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the different versions for the Resilience Hub applications.
+ */ + public listAppVersions( + args: ListAppVersionsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the recommendation templates for the Resilience Hub applications.
+ */ + public listRecommendationTemplates( + args: ListRecommendationTemplatesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the resiliency policies for the Resilience Hub applications.
+ */ + public listResiliencyPolicies( + args: ListResiliencyPoliciesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the standard operating procedure (SOP) recommendations for the Resilience Hub + * applications.
+ */ + public listSopRecommendations( + args: ListSopRecommendationsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the suggested resiliency policies for the Resilience Hub applications.
+ */ + public listSuggestedResiliencyPolicies( + args: ListSuggestedResiliencyPoliciesCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the tags for your resources in your Resilience Hub applications.
+ */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the test recommendations for the Resilience Hub application.
+ */ + public listTestRecommendations( + args: ListTestRecommendationsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the resources that are not currently supported in AWS Resilience Hub. An unsupported + * resource is a resource that exists in the object that was used to create an app, but is not + * supported by Resilience Hub.
+ */ + public listUnsupportedAppVersionResources( + args: ListUnsupportedAppVersionResourcesCommandInput, + options?: __HttpHandlerOptions + ): PromisePublishes a new version of a specific Resilience Hub application.
+ */ + public publishAppVersion( + args: PublishAppVersionCommandInput, + options?: __HttpHandlerOptions + ): PromiseAdds or updates the app template for a draft version of a Resilience Hub app.
+ */ + public putDraftAppVersionTemplate( + args: PutDraftAppVersionTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseRemoves resource mappings from a draft application version.
+ */ + public removeDraftAppVersionResourceMappings( + args: RemoveDraftAppVersionResourceMappingsCommandInput, + options?: __HttpHandlerOptions + ): PromiseResolves the resources for an application version.
+ */ + public resolveAppVersionResources( + args: ResolveAppVersionResourcesCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a new application assessment for an application.
+ */ + public startAppAssessment( + args: StartAppAssessmentCommandInput, + options?: __HttpHandlerOptions + ): PromiseApplies one or more tags to a resource.
+ */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): PromiseRemoves one or more tags from a resource.
+ */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates an application.
+ */ + public updateApp(args: UpdateAppCommandInput, options?: __HttpHandlerOptions): PromiseUpdates a resiliency policy.
+ */ + public updateResiliencyPolicy( + args: UpdateResiliencyPolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseAWS Resilience Hub helps you proactively prepare and protect your Amazon Web Services applications from + * disruptions. Resilience Hub offers continuous resiliency assessment and validation that integrates + * into your software development lifecycle. This enables you to uncover resiliency weaknesses, + * ensure recovery time objective (RTO) and recovery point objective (RPO) targets for your + * applications are met, and resolve issues before they are released into production.
+ */ +export class ResiliencehubClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + ResiliencehubClientResolvedConfig +> { + /** + * The resolved configuration of ResiliencehubClient class. This is resolved and normalized from the {@link ResiliencehubClientConfig | constructor configuration interface}. + */ + readonly config: ResiliencehubClientResolvedConfig; + + constructor(configuration: ResiliencehubClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-resiliencehub/src/commands/AddDraftAppVersionResourceMappingsCommand.ts b/clients/client-resiliencehub/src/commands/AddDraftAppVersionResourceMappingsCommand.ts new file mode 100644 index 0000000000000..f5841aa60c9d8 --- /dev/null +++ b/clients/client-resiliencehub/src/commands/AddDraftAppVersionResourceMappingsCommand.ts @@ -0,0 +1,106 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + AddDraftAppVersionResourceMappingsRequest, + AddDraftAppVersionResourceMappingsResponse, +} from "../models/models_0"; +import { + deserializeAws_restJson1AddDraftAppVersionResourceMappingsCommand, + serializeAws_restJson1AddDraftAppVersionResourceMappingsCommand, +} from "../protocols/Aws_restJson1"; +import { ResiliencehubClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ResiliencehubClient"; + +export interface AddDraftAppVersionResourceMappingsCommandInput extends AddDraftAppVersionResourceMappingsRequest {} +export interface AddDraftAppVersionResourceMappingsCommandOutput + extends AddDraftAppVersionResourceMappingsResponse, + __MetadataBearer {} + +/** + *Adds the resource mapping for the draft application version.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, AddDraftAppVersionResourceMappingsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, AddDraftAppVersionResourceMappingsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new AddDraftAppVersionResourceMappingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AddDraftAppVersionResourceMappingsCommandInput} for command's `input` shape. + * @see {@link AddDraftAppVersionResourceMappingsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class AddDraftAppVersionResourceMappingsCommand extends $Command< + AddDraftAppVersionResourceMappingsCommandInput, + AddDraftAppVersionResourceMappingsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AddDraftAppVersionResourceMappingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a Resilience Hub application. A Resilience Hub application is a collection of Amazon Web Services + * resources structured to prevent and recover Amazon Web Services application disruptions. To describe a + * Resilience Hub application, you provide an application name, resources from one or more–up to + * five–CloudFormation stacks, and an appropriate resiliency policy.
+ * + *After you create a Resilience Hub application, you publish it so that you can run a resiliency + * assessment on it. You can then use recommendations from the assessment to improve resiliency + * by running another assessment, comparing results, and then iterating the process until you + * achieve your goals for recovery time objective (RTO) and recovery point objective + * (RPO).
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, CreateAppCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, CreateAppCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new CreateAppCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateAppCommandInput} for command's `input` shape. + * @see {@link CreateAppCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class CreateAppCommand extends $Command< + CreateAppCommandInput, + CreateAppCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateAppCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a new recommendation template.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, CreateRecommendationTemplateCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, CreateRecommendationTemplateCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new CreateRecommendationTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateRecommendationTemplateCommandInput} for command's `input` shape. + * @see {@link CreateRecommendationTemplateCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class CreateRecommendationTemplateCommand extends $Command< + CreateRecommendationTemplateCommandInput, + CreateRecommendationTemplateCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateRecommendationTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a resiliency policy for an application.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, CreateResiliencyPolicyCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, CreateResiliencyPolicyCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new CreateResiliencyPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateResiliencyPolicyCommandInput} for command's `input` shape. + * @see {@link CreateResiliencyPolicyCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class CreateResiliencyPolicyCommand extends $Command< + CreateResiliencyPolicyCommandInput, + CreateResiliencyPolicyCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateResiliencyPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes an AWS Resilience Hub application assessment. This is a destructive action that can't + * be undone.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DeleteAppAssessmentCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DeleteAppAssessmentCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DeleteAppAssessmentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteAppAssessmentCommandInput} for command's `input` shape. + * @see {@link DeleteAppAssessmentCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DeleteAppAssessmentCommand extends $Command< + DeleteAppAssessmentCommandInput, + DeleteAppAssessmentCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteAppAssessmentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes an AWS Resilience Hub application. This is a destructive action that can't be + * undone.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DeleteAppCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DeleteAppCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DeleteAppCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteAppCommandInput} for command's `input` shape. + * @see {@link DeleteAppCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DeleteAppCommand extends $Command< + DeleteAppCommandInput, + DeleteAppCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteAppCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes a recommendation template. This is a destructive action that can't be + * undone.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DeleteRecommendationTemplateCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DeleteRecommendationTemplateCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DeleteRecommendationTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteRecommendationTemplateCommandInput} for command's `input` shape. + * @see {@link DeleteRecommendationTemplateCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DeleteRecommendationTemplateCommand extends $Command< + DeleteRecommendationTemplateCommandInput, + DeleteRecommendationTemplateCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteRecommendationTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes a resiliency policy. This is a destructive action that can't be undone.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DeleteResiliencyPolicyCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DeleteResiliencyPolicyCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DeleteResiliencyPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteResiliencyPolicyCommandInput} for command's `input` shape. + * @see {@link DeleteResiliencyPolicyCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DeleteResiliencyPolicyCommand extends $Command< + DeleteResiliencyPolicyCommandInput, + DeleteResiliencyPolicyCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteResiliencyPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes an assessment for an AWS Resilience Hub application.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DescribeAppAssessmentCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DescribeAppAssessmentCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DescribeAppAssessmentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeAppAssessmentCommandInput} for command's `input` shape. + * @see {@link DescribeAppAssessmentCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DescribeAppAssessmentCommand extends $Command< + DescribeAppAssessmentCommandInput, + DescribeAppAssessmentCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeAppAssessmentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes an AWS Resilience Hub application.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DescribeAppCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DescribeAppCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DescribeAppCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeAppCommandInput} for command's `input` shape. + * @see {@link DescribeAppCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DescribeAppCommand extends $Command< + DescribeAppCommandInput, + DescribeAppCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeAppCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns the resolution status for the specified resolution identifier for an application
+ * version. If resolutionId
is not specified, the current resolution status is
+ * returned.
Describes details about an AWS Resilience Hub
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DescribeAppVersionTemplateCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DescribeAppVersionTemplateCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DescribeAppVersionTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeAppVersionTemplateCommandInput} for command's `input` shape. + * @see {@link DescribeAppVersionTemplateCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DescribeAppVersionTemplateCommand extends $Command< + DescribeAppVersionTemplateCommandInput, + DescribeAppVersionTemplateCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeAppVersionTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes the status of importing resources to an application version.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DescribeDraftAppVersionResourcesImportStatusCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DescribeDraftAppVersionResourcesImportStatusCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DescribeDraftAppVersionResourcesImportStatusCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeDraftAppVersionResourcesImportStatusCommandInput} for command's `input` shape. + * @see {@link DescribeDraftAppVersionResourcesImportStatusCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DescribeDraftAppVersionResourcesImportStatusCommand extends $Command< + DescribeDraftAppVersionResourcesImportStatusCommandInput, + DescribeDraftAppVersionResourcesImportStatusCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeDraftAppVersionResourcesImportStatusCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDescribes a specified resiliency policy for an AWS Resilience Hub application. The returned + * policy object includes creation time, data location constraints, the Amazon Resource Name + * (ARN) for the policy, tags, tier, and more.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, DescribeResiliencyPolicyCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, DescribeResiliencyPolicyCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new DescribeResiliencyPolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeResiliencyPolicyCommandInput} for command's `input` shape. + * @see {@link DescribeResiliencyPolicyCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class DescribeResiliencyPolicyCommand extends $Command< + DescribeResiliencyPolicyCommandInput, + DescribeResiliencyPolicyCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeResiliencyPolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackImports resources from sources such as a CloudFormation stack, resource-groups, or application + * registry app to a draft application version.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ImportResourcesToDraftAppVersionCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ImportResourcesToDraftAppVersionCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ImportResourcesToDraftAppVersionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ImportResourcesToDraftAppVersionCommandInput} for command's `input` shape. + * @see {@link ImportResourcesToDraftAppVersionCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ImportResourcesToDraftAppVersionCommand extends $Command< + ImportResourcesToDraftAppVersionCommandInput, + ImportResourcesToDraftAppVersionCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ImportResourcesToDraftAppVersionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the alarm recommendations for a AWS Resilience Hub application.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAlarmRecommendationsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAlarmRecommendationsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAlarmRecommendationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAlarmRecommendationsCommandInput} for command's `input` shape. + * @see {@link ListAlarmRecommendationsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAlarmRecommendationsCommand extends $Command< + ListAlarmRecommendationsCommandInput, + ListAlarmRecommendationsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAlarmRecommendationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the assessments for an AWS Resilience Hub application. You can use request parameters to + * refine the results for the response object.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppAssessmentsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppAssessmentsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppAssessmentsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppAssessmentsCommandInput} for command's `input` shape. + * @see {@link ListAppAssessmentsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppAssessmentsCommand extends $Command< + ListAppAssessmentsCommandInput, + ListAppAssessmentsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppAssessmentsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the compliances for an AWS Resilience Hub component.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppComponentCompliancesCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppComponentCompliancesCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppComponentCompliancesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppComponentCompliancesCommandInput} for command's `input` shape. + * @see {@link ListAppComponentCompliancesCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppComponentCompliancesCommand extends $Command< + ListAppComponentCompliancesCommandInput, + ListAppComponentCompliancesCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppComponentCompliancesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the recommendations for an AWS Resilience Hub component.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppComponentRecommendationsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppComponentRecommendationsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppComponentRecommendationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppComponentRecommendationsCommandInput} for command's `input` shape. + * @see {@link ListAppComponentRecommendationsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppComponentRecommendationsCommand extends $Command< + ListAppComponentRecommendationsCommandInput, + ListAppComponentRecommendationsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppComponentRecommendationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists how the resources in an application version are mapped/sourced from. Mappings can be + * physical resource identifiers, CloudFormation stacks, resource-groups, or an application registry + * app.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppVersionResourceMappingsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppVersionResourceMappingsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppVersionResourceMappingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppVersionResourceMappingsCommandInput} for command's `input` shape. + * @see {@link ListAppVersionResourceMappingsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppVersionResourceMappingsCommand extends $Command< + ListAppVersionResourceMappingsCommandInput, + ListAppVersionResourceMappingsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppVersionResourceMappingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists all the resources in an application version.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppVersionResourcesCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppVersionResourcesCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppVersionResourcesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppVersionResourcesCommandInput} for command's `input` shape. + * @see {@link ListAppVersionResourcesCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppVersionResourcesCommand extends $Command< + ListAppVersionResourcesCommandInput, + ListAppVersionResourcesCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppVersionResourcesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the different versions for the Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppVersionsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppVersionsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppVersionsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppVersionsCommandInput} for command's `input` shape. + * @see {@link ListAppVersionsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppVersionsCommand extends $Command< + ListAppVersionsCommandInput, + ListAppVersionsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppVersionsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists your Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListAppsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListAppsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListAppsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppsCommandInput} for command's `input` shape. + * @see {@link ListAppsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListAppsCommand extends $Command< + ListAppsCommandInput, + ListAppsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the recommendation templates for the Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListRecommendationTemplatesCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListRecommendationTemplatesCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListRecommendationTemplatesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListRecommendationTemplatesCommandInput} for command's `input` shape. + * @see {@link ListRecommendationTemplatesCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListRecommendationTemplatesCommand extends $Command< + ListRecommendationTemplatesCommandInput, + ListRecommendationTemplatesCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListRecommendationTemplatesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the resiliency policies for the Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListResiliencyPoliciesCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListResiliencyPoliciesCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListResiliencyPoliciesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListResiliencyPoliciesCommandInput} for command's `input` shape. + * @see {@link ListResiliencyPoliciesCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListResiliencyPoliciesCommand extends $Command< + ListResiliencyPoliciesCommandInput, + ListResiliencyPoliciesCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListResiliencyPoliciesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the standard operating procedure (SOP) recommendations for the Resilience Hub + * applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListSopRecommendationsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListSopRecommendationsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListSopRecommendationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListSopRecommendationsCommandInput} for command's `input` shape. + * @see {@link ListSopRecommendationsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListSopRecommendationsCommand extends $Command< + ListSopRecommendationsCommandInput, + ListSopRecommendationsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListSopRecommendationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the suggested resiliency policies for the Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListSuggestedResiliencyPoliciesCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListSuggestedResiliencyPoliciesCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListSuggestedResiliencyPoliciesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListSuggestedResiliencyPoliciesCommandInput} for command's `input` shape. + * @see {@link ListSuggestedResiliencyPoliciesCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListSuggestedResiliencyPoliciesCommand extends $Command< + ListSuggestedResiliencyPoliciesCommandInput, + ListSuggestedResiliencyPoliciesCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListSuggestedResiliencyPoliciesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the tags for your resources in your Resilience Hub applications.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListTagsForResourceCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListTagsForResourceCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the test recommendations for the Resilience Hub application.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ResiliencehubClient, ListTestRecommendationsCommand } from "@aws-sdk/client-resiliencehub"; // ES Modules import + * // const { ResiliencehubClient, ListTestRecommendationsCommand } = require("@aws-sdk/client-resiliencehub"); // CommonJS import + * const client = new ResiliencehubClient(config); + * const command = new ListTestRecommendationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTestRecommendationsCommandInput} for command's `input` shape. + * @see {@link ListTestRecommendationsCommandOutput} for command's `response` shape. + * @see {@link ResiliencehubClientResolvedConfig | config} for ResiliencehubClient's `config` shape. + * + */ +export class ListTestRecommendationsCommand extends $Command< + ListTestRecommendationsCommandInput, + ListTestRecommendationsCommandOutput, + ResiliencehubClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTestRecommendationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack